Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/logs/demand-backup.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-10367 + local ns=demand-backup-10367 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.kline62KDE ++ mktemp + local LAST_ERR=/tmp/tmp.ZMdp148wal + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kline62KDE customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.ZMdp148wal + rm /tmp/tmp.kline62KDE /tmp/tmp.ZMdp148wal + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.tUpZJq90zU ++ mktemp + local LAST_ERR=/tmp/tmp.hpbD6ZomII + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tUpZJq90zU + cat /tmp/tmp.hpbD6ZomII + rm /tmp/tmp.tUpZJq90zU /tmp/tmp.hpbD6ZomII + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JoM7AfpISl ++ mktemp + local LAST_ERR=/tmp/tmp.KiumiRw2HM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JoM7AfpISl + cat /tmp/tmp.KiumiRw2HM + rm /tmp/tmp.JoM7AfpISl /tmp/tmp.KiumiRw2HM + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.lZ1JfbM4re ++ mktemp + local LAST_ERR=/tmp/tmp.SvzRAeGsi0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lZ1JfbM4re + cat /tmp/tmp.SvzRAeGsi0 + rm /tmp/tmp.lZ1JfbM4re /tmp/tmp.SvzRAeGsi0 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.TyQMw4J2bV ++ mktemp + local LAST_ERR=/tmp/tmp.LTUzq0uhH2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TyQMw4J2bV clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.LTUzq0uhH2 + rm /tmp/tmp.TyQMw4J2bV /tmp/tmp.LTUzq0uhH2 + return 0 + check_crd_for_deletion PR-1776-5e3425fc + local git_tag=PR-1776-5e3425fc ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1776-5e3425fc/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Um0SKiylrj +++ mktemp ++ local LAST_ERR=/tmp/tmp.Wi8J5bzDQu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Um0SKiylrj ++ cat /tmp/tmp.Wi8J5bzDQu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Um0SKiylrj ++ cat /tmp/tmp.Wi8J5bzDQu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Um0SKiylrj ++ cat /tmp/tmp.Wi8J5bzDQu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.Um0SKiylrj ++ cat /tmp/tmp.Wi8J5bzDQu Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.Um0SKiylrj /tmp/tmp.Wi8J5bzDQu ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.JUfD7OoFTC + local LAST_OUT=/tmp/tmp.4iVQ324RHM ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.MFt9HIPcMW + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.YcHjiwG1Xp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4iVQ324RHM + cat /tmp/tmp.YcHjiwG1Xp + rm /tmp/tmp.4iVQ324RHM /tmp/tmp.YcHjiwG1Xp + return 0 namespace "demand-backup-4819" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JUfD7OoFTC namespace "psmdb-operator" deleted + cat /tmp/tmp.MFt9HIPcMW + rm /tmp/tmp.JUfD7OoFTC /tmp/tmp.MFt9HIPcMW + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ERlhSj7Sl4 ++ mktemp + local LAST_ERR=/tmp/tmp.RpAusAyvfy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ERlhSj7Sl4 + cat /tmp/tmp.RpAusAyvfy + rm /tmp/tmp.ERlhSj7Sl4 /tmp/tmp.RpAusAyvfy + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.63GZKN32J9 ++ mktemp + local LAST_ERR=/tmp/tmp.7rDrKHumdp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.63GZKN32J9 namespace/psmdb-operator created + cat /tmp/tmp.7rDrKHumdp + rm /tmp/tmp.63GZKN32J9 /tmp/tmp.7rDrKHumdp + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.G16Uj67jtc +++ mktemp ++ local LAST_ERR=/tmp/tmp.mM0jP9IE7z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G16Uj67jtc ++ cat /tmp/tmp.mM0jP9IE7z ++ rm /tmp/tmp.G16Uj67jtc /tmp/tmp.mM0jP9IE7z ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-5e3425fc-6-cluster10 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2FtQ3t7NeH ++ mktemp + local LAST_ERR=/tmp/tmp.qCV70D7vuJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-5e3425fc-6-cluster10 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2FtQ3t7NeH Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-5e3425fc-6-cluster10" modified. + cat /tmp/tmp.qCV70D7vuJ + rm /tmp/tmp.2FtQ3t7NeH /tmp/tmp.qCV70D7vuJ + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6iH5NbHFUI ++ mktemp + local LAST_ERR=/tmp/tmp.42WhA2r7TQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6iH5NbHFUI customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.42WhA2r7TQ + rm /tmp/tmp.6iH5NbHFUI /tmp/tmp.42WhA2r7TQ + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.1Mrf1AgWGq ++ mktemp + sed -e 's^namespace: .*^namespace: psmdb-operator^' + local LAST_ERR=/tmp/tmp.co1q9iZvPy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1Mrf1AgWGq clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.co1q9iZvPy + rm /tmp/tmp.1Mrf1AgWGq /tmp/tmp.co1q9iZvPy + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1776-5e3425fc") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.DZck378PK5 ++ mktemp + local LAST_ERR=/tmp/tmp.gYkvE9dZcL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DZck378PK5 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.gYkvE9dZcL + rm /tmp/tmp.DZck378PK5 /tmp/tmp.gYkvE9dZcL + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.quEQgx0DXd +++ mktemp ++ local LAST_ERR=/tmp/tmp.NgCKJMueC9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.quEQgx0DXd ++ cat /tmp/tmp.NgCKJMueC9 ++ rm /tmp/tmp.quEQgx0DXd /tmp/tmp.NgCKJMueC9 ++ return 0 + wait_pod percona-server-mongodb-operator-8f945b46d-smmnb + local pod=percona-server-mongodb-operator-8f945b46d-smmnb + set +o xtrace waiting for pod/percona-server-mongodb-operator-8f945b46d-smmnb to be ready.OK + create_namespace demand-backup-10367 + local namespace=demand-backup-10367 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-10367' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-10367 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-10367 --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + kubectl_bin get ns + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.xUXH2U2L8w + local LAST_OUT=/tmp/tmp.flocoUPwRL ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.5YoOxVnvNy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.zLnRWKZeDF + local exit_status=0 + local timeout=4 + awk '{print$1}' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-10367 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.flocoUPwRL + cat /tmp/tmp.5YoOxVnvNy + rm /tmp/tmp.flocoUPwRL /tmp/tmp.5YoOxVnvNy + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xUXH2U2L8w + cat /tmp/tmp.zLnRWKZeDF + rm /tmp/tmp.xUXH2U2L8w /tmp/tmp.zLnRWKZeDF + return 0 + kubectl_bin wait --for=delete namespace demand-backup-10367 ++ mktemp + local LAST_OUT=/tmp/tmp.dx8CCKqWyw ++ mktemp + local LAST_ERR=/tmp/tmp.EeOiEJ9Un1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-10367 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dx8CCKqWyw + cat /tmp/tmp.EeOiEJ9Un1 + rm /tmp/tmp.dx8CCKqWyw /tmp/tmp.EeOiEJ9Un1 + return 0 + desc 'create namespace demand-backup-10367' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-10367 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-10367 ++ mktemp + local LAST_OUT=/tmp/tmp.wJeRgpd3De ++ mktemp + local LAST_ERR=/tmp/tmp.m69deuLn04 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-10367 namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wJeRgpd3De namespace/demand-backup-10367 created + cat /tmp/tmp.m69deuLn04 + rm /tmp/tmp.wJeRgpd3De /tmp/tmp.m69deuLn04 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.QpVCOXWgJz +++ mktemp ++ local LAST_ERR=/tmp/tmp.EhKP13Xvef ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QpVCOXWgJz ++ cat /tmp/tmp.EhKP13Xvef ++ rm /tmp/tmp.QpVCOXWgJz /tmp/tmp.EhKP13Xvef ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-5e3425fc-6-cluster10 --namespace=demand-backup-10367 ++ mktemp + local LAST_OUT=/tmp/tmp.fwu81qHRRZ ++ mktemp + local LAST_ERR=/tmp/tmp.2jMjt7QaHu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-5e3425fc-6-cluster10 --namespace=demand-backup-10367 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fwu81qHRRZ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1776-5e3425fc-6-cluster10" modified. + cat /tmp/tmp.2jMjt7QaHu + rm /tmp/tmp.fwu81qHRRZ /tmp/tmp.2jMjt7QaHu + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Mon May 5 10:56:57 2025 NAMESPACE: demand-backup-10367 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-10367.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-10367 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-10367 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-10367 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-10367 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JFSGUg7etm +++ mktemp ++ local LAST_ERR=/tmp/tmp.JD9ZZDVYCV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JFSGUg7etm ++ cat /tmp/tmp.JD9ZZDVYCV ++ rm /tmp/tmp.JFSGUg7etm /tmp/tmp.JD9ZZDVYCV ++ return 0 + MINIO_POD=minio-service-8967c7f7f-xt2rm + wait_pod minio-service-8967c7f7f-xt2rm + local pod=minio-service-8967c7f7f-xt2rm + set +o xtrace waiting for pod/minio-service-8967c7f7f-xt2rm to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-10367.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.tp3sNvIFGJ ++ mktemp + local LAST_ERR=/tmp/tmp.D0sWNDZleZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-10367.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tp3sNvIFGJ service/minio-service created + cat /tmp/tmp.D0sWNDZleZ + rm /tmp/tmp.tp3sNvIFGJ /tmp/tmp.D0sWNDZleZ + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.3Eg7fnslQV ++ mktemp + local LAST_ERR=/tmp/tmp.nu4IQdjYaB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3Eg7fnslQV make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.nu4IQdjYaB If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-10367 + rm /tmp/tmp.3Eg7fnslQV /tmp/tmp.nu4IQdjYaB + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + cluster=some-name-rs0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.HRoNiDemVE ++ mktemp + local LAST_ERR=/tmp/tmp.ITxDxUwT3j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HRoNiDemVE secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.ITxDxUwT3j + rm /tmp/tmp.HRoNiDemVE /tmp/tmp.ITxDxUwT3j + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.kTx4PVv6qx ++ mktemp + local LAST_ERR=/tmp/tmp.lZZ2CixnWA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kTx4PVv6qx secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.lZZ2CixnWA + rm /tmp/tmp.kTx4PVv6qx /tmp/tmp.lZZ2CixnWA + return 0 + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/some-name-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/some-name-rs0.yml + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1776-5e3425fc"' + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jnwNCiV3Qt ++ mktemp + local LAST_ERR=/tmp/tmp.xPy8Tn1rHa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jnwNCiV3Qt perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.xPy8Tn1rHa + rm /tmp/tmp.jnwNCiV3Qt /tmp/tmp.xPy8Tn1rHa + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DwQaluA7Vs +++ mktemp ++ local LAST_ERR=/tmp/tmp.rdR8oq7ui6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DwQaluA7Vs ++ cat /tmp/tmp.rdR8oq7ui6 ++ rm /tmp/tmp.DwQaluA7Vs /tmp/tmp.rdR8oq7ui6 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M23tyxgrUX +++ mktemp ++ local LAST_ERR=/tmp/tmp.1yPzpAoV4X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M23tyxgrUX ++ cat /tmp/tmp.1yPzpAoV4X ++ rm /tmp/tmp.M23tyxgrUX /tmp/tmp.1yPzpAoV4X ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness... + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.lIZgOnVaLV/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-10367", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ckG5oxoZa3 ++ mktemp + local LAST_ERR=/tmp/tmp.4RD9p2eLAC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ckG5oxoZa3 + cat /tmp/tmp.4RD9p2eLAC + rm /tmp/tmp.ckG5oxoZa3 /tmp/tmp.4RD9p2eLAC + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.lIZgOnVaLV/statefulset_some-name-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.lIZgOnVaLV/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.lIZgOnVaLV/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/statefulset_some-name-rs0.yml /tmp/tmp.lIZgOnVaLV/statefulset_some-name-rs0.yml + desc 'create user' + set +o xtrace ----------------------------------------------------------------------------------- create user ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vnS7ZrO5gk +++ mktemp ++ local LAST_ERR=/tmp/tmp.vQQTl5OJM4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vnS7ZrO5gk ++ cat /tmp/tmp.vQQTl5OJM4 ++ rm /tmp/tmp.vnS7ZrO5gk /tmp/tmp.vQQTl5OJM4 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.10W6obxKxH ++ mktemp + local LAST_ERR=/tmp/tmp.0hxdhcAJpx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.10W6obxKxH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c0c9a5bb-7106-4c0f-ab2b-e796efce0eae") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.0hxdhcAJpx + rm /tmp/tmp.10W6obxKxH /tmp/tmp.0hxdhcAJpx + return 0 + sleep 2 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eNnRJTXZWb +++ mktemp ++ local LAST_ERR=/tmp/tmp.jxcFOeztQu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eNnRJTXZWb ++ cat /tmp/tmp.jxcFOeztQu ++ rm /tmp/tmp.eNnRJTXZWb /tmp/tmp.jxcFOeztQu ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.2yL1Ne5XHM ++ mktemp + local LAST_ERR=/tmp/tmp.UfJ6TKpqjJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2yL1Ne5XHM Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f7b76fe5-bb3e-40db-a233-06e93a2cb9c8") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.UfJ6TKpqjJ + rm /tmp/tmp.2yL1Ne5XHM /tmp/tmp.UfJ6TKpqjJ + return 0 + run_mongo 'use myApp\n db.test2.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test2.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V3j0oIsFjt +++ mktemp ++ local LAST_ERR=/tmp/tmp.AnoDCArzfa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V3j0oIsFjt ++ cat /tmp/tmp.AnoDCArzfa ++ rm /tmp/tmp.V3j0oIsFjt /tmp/tmp.AnoDCArzfa ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.C7rCIGGr51 ++ mktemp + local LAST_ERR=/tmp/tmp.FDRvrPRMcL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C7rCIGGr51 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3aae2256-8861-4e85-8a8d-df55f464d305") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.FDRvrPRMcL + rm /tmp/tmp.C7rCIGGr51 /tmp/tmp.FDRvrPRMcL + return 0 + run_mongo 'use myApp\n db.test3.insert({ x: 100502 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test3.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.409CQudHpZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.HrJYWN1d07 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.409CQudHpZ ++ cat /tmp/tmp.HrJYWN1d07 ++ rm /tmp/tmp.409CQudHpZ /tmp/tmp.HrJYWN1d07 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test3.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.l8dSGE7t8U ++ mktemp + local LAST_ERR=/tmp/tmp.ttdXBf8U8l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test3.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l8dSGE7t8U Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("26af14db-6f9f-4421-9576-dfa95cf3d8e8") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ttdXBf8U8l + rm /tmp/tmp.l8dSGE7t8U /tmp/tmp.ttdXBf8U8l + return 0 + custom_user_name=test1user + custom_role_name=test1role + run_mongo 'use myApp\n db.createUser({user: "test1user",pwd:"test1pass",roles:[]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.createUser({user: "test1user",pwd:"test1pass",roles:[]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j74K7wUhoP +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Spq0kNkEq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j74K7wUhoP ++ cat /tmp/tmp.4Spq0kNkEq ++ rm /tmp/tmp.j74K7wUhoP /tmp/tmp.4Spq0kNkEq ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.createUser({user: "test1user",pwd:"test1pass",roles:[]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JjXe1yOADF ++ mktemp + local LAST_ERR=/tmp/tmp.laszcRrx8H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.createUser({user: "test1user",pwd:"test1pass",roles:[]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JjXe1yOADF Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9d06e45c-6850-4bdb-94aa-7103e7e80088") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp Successfully added user: { "user" : "test1user", "roles" : [ ] } bye + cat /tmp/tmp.laszcRrx8H + rm /tmp/tmp.JjXe1yOADF /tmp/tmp.laszcRrx8H + return 0 + run_mongo 'use myApp\n db.createRole({"role":"test1role", privileges:[],roles:[]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.createRole({"role":"test1role", privileges:[],roles:[]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3YmfbKYMsU +++ mktemp ++ local LAST_ERR=/tmp/tmp.VCjw6xY9ap ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3YmfbKYMsU ++ cat /tmp/tmp.VCjw6xY9ap ++ rm /tmp/tmp.3YmfbKYMsU /tmp/tmp.VCjw6xY9ap ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.createRole({"role":"test1role", privileges:[],roles:[]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8HkP85kK3z ++ mktemp + local LAST_ERR=/tmp/tmp.CxNZIkl0ZY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.createRole({"role":"test1role", privileges:[],roles:[]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8HkP85kK3z Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("39f2f22e-9174-4a8b-97e7-1915b734d471") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp { "role" : "test1role", "privileges" : [ ], "roles" : [ ] } bye + cat /tmp/tmp.CxNZIkl0ZY + rm /tmp/tmp.8HkP85kK3z /tmp/tmp.CxNZIkl0ZY + return 0 + minikube_sleep + sleep_time=10 + [[ '' == 1 ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T10:59:38+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6r07WuC5hP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Py1WaBs7hN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6r07WuC5hP ++ cat /tmp/tmp.Py1WaBs7hN ++ rm /tmp/tmp.6r07WuC5hP /tmp/tmp.Py1WaBs7hN ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JNwJbdKNtS ++ mktemp + local LAST_ERR=/tmp/tmp.heiUXduVVI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JNwJbdKNtS + cat /tmp/tmp.heiUXduVVI + rm /tmp/tmp.JNwJbdKNtS /tmp/tmp.heiUXduVVI + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T10:59:42+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Njjqh4QvDy +++ mktemp ++ local LAST_ERR=/tmp/tmp.GNtGoQxv1T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Njjqh4QvDy ++ cat /tmp/tmp.GNtGoQxv1T ++ rm /tmp/tmp.Njjqh4QvDy /tmp/tmp.GNtGoQxv1T ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ViA4iLVsqL ++ mktemp + local LAST_ERR=/tmp/tmp.zHctUjnd2o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ViA4iLVsqL + cat /tmp/tmp.zHctUjnd2o + rm /tmp/tmp.ViA4iLVsqL /tmp/tmp.zHctUjnd2o + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T10:59:47+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A9AoP6m2Lx +++ mktemp ++ local LAST_ERR=/tmp/tmp.zTDRVy4oUh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A9AoP6m2Lx ++ cat /tmp/tmp.zTDRVy4oUh ++ rm /tmp/tmp.A9AoP6m2Lx /tmp/tmp.zTDRVy4oUh ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.C7ViItpMob ++ mktemp + local LAST_ERR=/tmp/tmp.cudhjBf535 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C7ViItpMob + cat /tmp/tmp.cudhjBf535 + rm /tmp/tmp.C7ViItpMob /tmp/tmp.cudhjBf535 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-05-05T10:59:07.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-05-05T10:59:06.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-05-05T10:59:12.000+0000 I listening for the commands + backup_name_minio=backup-minio + '[' -z '' ']' + backup_name_aws=backup-aws-s3 + backup_name_gcp=backup-gcp-cs + backup_name_azure=backup-azure-blob + desc 'run backups' + set +o xtrace ----------------------------------------------------------------------------------- run backups ----------------------------------------------------------------------------------- + '[' -z '' ']' + run_backup aws-s3 + local storage=aws-s3 + local backup_name=backup-aws-s3 + local type=logical + desc 'run backup backup-aws-s3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-aws-s3 ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-aws-s3" | .spec.storageName = "aws-s3" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-aws-s3.yml ++ mktemp + local LAST_OUT=/tmp/tmp.W1W6C4qngE ++ mktemp + local LAST_ERR=/tmp/tmp.v4JOj42ESg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W1W6C4qngE perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 created + cat /tmp/tmp.v4JOj42ESg + rm /tmp/tmp.W1W6C4qngE /tmp/tmp.v4JOj42ESg + return 0 + run_backup gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + local type=logical + desc 'run backup backup-gcp-cs' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-gcp-cs ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-gcp-cs" | .spec.storageName = "gcp-cs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-gcp-cs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.HuPvOmFx7A ++ mktemp + local LAST_ERR=/tmp/tmp.cNhzXln9Jj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HuPvOmFx7A perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.cNhzXln9Jj + rm /tmp/tmp.HuPvOmFx7A /tmp/tmp.cNhzXln9Jj + return 0 + run_backup azure-blob + local storage=azure-blob + local backup_name=backup-azure-blob + local type=logical + desc 'run backup backup-azure-blob' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-azure-blob ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-azure-blob" | .spec.storageName = "azure-blob" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-azure-blob.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.xCDq6aX5wx ++ mktemp + local LAST_ERR=/tmp/tmp.4m1g5H2Nst + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xCDq6aX5wx perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob created + cat /tmp/tmp.4m1g5H2Nst + rm /tmp/tmp.xCDq6aX5wx /tmp/tmp.4m1g5H2Nst + return 0 + run_backup minio + local storage=minio + local backup_name=backup-minio + local type=logical + desc 'run backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.I2h2RwhDNf ++ mktemp + local LAST_ERR=/tmp/tmp.SmxByuRt1m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I2h2RwhDNf perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.SmxByuRt1m + rm /tmp/tmp.I2h2RwhDNf /tmp/tmp.SmxByuRt1m + return 0 + '[' -z '' ']' + wait_backup backup-aws-s3 + local backup_name=backup-aws-s3 + local target_state=ready + set +o xtrace waiting for backup-aws-s3 to reach ready state.. + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + local target_state=ready + set +o xtrace waiting for backup-gcp-cs to reach ready state......... + wait_backup backup-azure-blob + local backup_name=backup-azure-blob + local target_state=ready + set +o xtrace waiting for backup-azure-blob to reach ready state..... + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state. + sleep 5 + '[' -z '' ']' + desc 'check backup and restore -- aws-s3' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- aws-s3 ----------------------------------------------------------------------------------- ++ get_backup_dest backup-aws-s3 ++ local backup_name=backup-aws-s3 ++ kubectl_bin get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|azure://||' ++ sed 's|s3://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oIU44Zj1ys +++ mktemp ++ local LAST_ERR=/tmp/tmp.KYK0721vc8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oIU44Zj1ys ++ cat /tmp/tmp.KYK0721vc8 ++ rm /tmp/tmp.oIU44Zj1ys /tmp/tmp.KYK0721vc8 ++ return 0 + backup_dest_aws=operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z + gunzip + curl -s https://s3.amazonaws.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z/rs0/myApp.test.gz + run_recovery_check backup-aws-s3 some-name-rs0 + local backup=backup-aws-s3 + local cluster=some-name-rs0 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yZPliftW2o +++ mktemp ++ local LAST_ERR=/tmp/tmp.kzqJcKZB9n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yZPliftW2o ++ cat /tmp/tmp.kzqJcKZB9n ++ rm /tmp/tmp.yZPliftW2o /tmp/tmp.kzqJcKZB9n ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nz7O69P1Wz ++ mktemp + local LAST_ERR=/tmp/tmp.4U6qJkaEIj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nz7O69P1Wz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7469b4aa-6cbc-4c4e-befc-9f4804d50f9c") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.4U6qJkaEIj + rm /tmp/tmp.nz7O69P1Wz /tmp/tmp.4U6qJkaEIj + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-10367 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:00:59+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-10367 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lHxFUoDv7g +++ mktemp ++ local LAST_ERR=/tmp/tmp.oPoXuIC4zO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lHxFUoDv7g ++ cat /tmp/tmp.oPoXuIC4zO ++ rm /tmp/tmp.lHxFUoDv7g /tmp/tmp.oPoXuIC4zO ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ovArSO3Ek3 ++ mktemp + local LAST_ERR=/tmp/tmp.VjtidARHY6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ovArSO3Ek3 + cat /tmp/tmp.VjtidARHY6 + rm /tmp/tmp.ovArSO3Ek3 /tmp/tmp.VjtidARHY6 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find-2nd.json /tmp/tmp.lIZgOnVaLV/find-2nd + run_restore backup-aws-s3 + local backup_name=backup-aws-s3 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RB6yV2bHf9 ++ mktemp + local LAST_ERR=/tmp/tmp.hjp6MvBbwH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's/backupName:/backupName: backup-aws-s3/' + /usr/bin/sed -e 's/name:/name: restore-backup-aws-s3/' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RB6yV2bHf9 perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3 created + cat /tmp/tmp.hjp6MvBbwH + rm /tmp/tmp.RB6yV2bHf9 /tmp/tmp.hjp6MvBbwH + return 0 + wait_restore backup-aws-s3 some-name + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-aws-s3 to reach ready state........OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.80dRk96QHs +++ mktemp ++ local LAST_ERR=/tmp/tmp.CjpJIqmMmX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.80dRk96QHs ++ cat /tmp/tmp.CjpJIqmMmX ++ rm /tmp/tmp.80dRk96QHs /tmp/tmp.CjpJIqmMmX ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + simple_data_check some-name-rs0 3 0 + local cluster_name=some-name-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:01:25+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dX3GuMGcwb +++ mktemp ++ local LAST_ERR=/tmp/tmp.bYwqMhH3pn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dX3GuMGcwb ++ cat /tmp/tmp.bYwqMhH3pn ++ rm /tmp/tmp.dX3GuMGcwb /tmp/tmp.bYwqMhH3pn ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ehRD5z7u6y ++ mktemp + local LAST_ERR=/tmp/tmp.cvzTWTNH2S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ehRD5z7u6y + cat /tmp/tmp.cvzTWTNH2S + rm /tmp/tmp.ehRD5z7u6y /tmp/tmp.cvzTWTNH2S + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:01:28+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iUQIiOtM0o +++ mktemp ++ local LAST_ERR=/tmp/tmp.mUmO1ikyxM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iUQIiOtM0o ++ cat /tmp/tmp.mUmO1ikyxM ++ rm /tmp/tmp.iUQIiOtM0o /tmp/tmp.mUmO1ikyxM ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.p1sC2Xg22k ++ mktemp + local LAST_ERR=/tmp/tmp.Nf2UEtJWBr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p1sC2Xg22k + cat /tmp/tmp.Nf2UEtJWBr + rm /tmp/tmp.p1sC2Xg22k /tmp/tmp.Nf2UEtJWBr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:01:32+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CtHGlNw6FM +++ mktemp ++ local LAST_ERR=/tmp/tmp.n4nEff4NkD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CtHGlNw6FM ++ cat /tmp/tmp.n4nEff4NkD ++ rm /tmp/tmp.CtHGlNw6FM /tmp/tmp.n4nEff4NkD ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zsNrjoRCEO ++ mktemp + local LAST_ERR=/tmp/tmp.urPiVnr0zy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zsNrjoRCEO + cat /tmp/tmp.urPiVnr0zy + rm /tmp/tmp.zsNrjoRCEO /tmp/tmp.urPiVnr0zy + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + desc 'check backup and restore -- gcp-cs' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- gcp-cs ----------------------------------------------------------------------------------- ++ get_backup_dest backup-gcp-cs ++ local backup_name=backup-gcp-cs ++ kubectl_bin get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ sed 's|azure://||' ++ sed 's|s3://||' ++ sed -e 's/.json$//' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hVTXdRL0ns +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ucm14c5kcU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hVTXdRL0ns ++ cat /tmp/tmp.Ucm14c5kcU ++ rm /tmp/tmp.hVTXdRL0ns /tmp/tmp.Ucm14c5kcU ++ return 0 + backup_dest_gcp=operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z + curl -s https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z/rs0/myApp.test.gz + gunzip + run_recovery_check backup-gcp-cs some-name-rs0 + local backup=backup-gcp-cs + local cluster=some-name-rs0 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ibhnL2iHaw +++ mktemp ++ local LAST_ERR=/tmp/tmp.tpIDsYMF2Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ibhnL2iHaw ++ cat /tmp/tmp.tpIDsYMF2Q ++ rm /tmp/tmp.ibhnL2iHaw /tmp/tmp.tpIDsYMF2Q ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QRVlmgkQUf ++ mktemp + local LAST_ERR=/tmp/tmp.fA0gYPYVif + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QRVlmgkQUf Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2cb7140c-bd0f-4224-898a-12742112adb2") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.fA0gYPYVif + rm /tmp/tmp.QRVlmgkQUf /tmp/tmp.fA0gYPYVif + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-10367 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:01:42+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-10367 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aW4eJzETGG +++ mktemp ++ local LAST_ERR=/tmp/tmp.jVPseCTUD3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aW4eJzETGG ++ cat /tmp/tmp.jVPseCTUD3 ++ rm /tmp/tmp.aW4eJzETGG /tmp/tmp.jVPseCTUD3 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.d4YsXew9Pz ++ mktemp + local LAST_ERR=/tmp/tmp.PZI8O8Skic + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d4YsXew9Pz + cat /tmp/tmp.PZI8O8Skic + rm /tmp/tmp.d4YsXew9Pz /tmp/tmp.PZI8O8Skic + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find-2nd.json /tmp/tmp.lIZgOnVaLV/find-2nd + run_restore backup-gcp-cs + local backup_name=backup-gcp-cs + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-gcp-cs/' + local LAST_OUT=/tmp/tmp.odnSV3wtyn + /usr/bin/sed -e 's/backupName:/backupName: backup-gcp-cs/' ++ mktemp + local LAST_ERR=/tmp/tmp.QKdAfiwQz0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.odnSV3wtyn perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs created + cat /tmp/tmp.QKdAfiwQz0 + rm /tmp/tmp.odnSV3wtyn /tmp/tmp.QKdAfiwQz0 + return 0 + wait_restore backup-gcp-cs some-name + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-gcp-cs to reach ready state.....OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AIVuZijxrb +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gn8zxShztR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AIVuZijxrb ++ cat /tmp/tmp.Gn8zxShztR ++ rm /tmp/tmp.AIVuZijxrb /tmp/tmp.Gn8zxShztR ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + simple_data_check some-name-rs0 3 0 + local cluster_name=some-name-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:02:09+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.qki8E71hk1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9S54aqwroP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qki8E71hk1 ++ cat /tmp/tmp.9S54aqwroP ++ rm /tmp/tmp.qki8E71hk1 /tmp/tmp.9S54aqwroP ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JJYW3rcFOi ++ mktemp + local LAST_ERR=/tmp/tmp.02PivvBfYf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JJYW3rcFOi + cat /tmp/tmp.02PivvBfYf + rm /tmp/tmp.JJYW3rcFOi /tmp/tmp.02PivvBfYf + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:02:13+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6PbIanfuir +++ mktemp ++ local LAST_ERR=/tmp/tmp.PmrrP0al0I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6PbIanfuir ++ cat /tmp/tmp.PmrrP0al0I ++ rm /tmp/tmp.6PbIanfuir /tmp/tmp.PmrrP0al0I ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8msq3WXYir ++ mktemp + local LAST_ERR=/tmp/tmp.kDg2thDBK2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8msq3WXYir + cat /tmp/tmp.kDg2thDBK2 + rm /tmp/tmp.8msq3WXYir /tmp/tmp.kDg2thDBK2 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:02:15+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.GW8BodzLyG +++ mktemp ++ local LAST_ERR=/tmp/tmp.0chb0xoOi5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GW8BodzLyG ++ cat /tmp/tmp.0chb0xoOi5 ++ rm /tmp/tmp.GW8BodzLyG /tmp/tmp.0chb0xoOi5 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Azvq6nQe0U ++ mktemp + local LAST_ERR=/tmp/tmp.Onj1JnS2Zi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Azvq6nQe0U + cat /tmp/tmp.Onj1JnS2Zi + rm /tmp/tmp.Azvq6nQe0U /tmp/tmp.Onj1JnS2Zi + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + desc 'check backup and restore -- azure-blob' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- azure-blob ----------------------------------------------------------------------------------- ++ get_backup_dest backup-azure-blob ++ local backup_name=backup-azure-blob ++ kubectl_bin get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' ++ sed 's|azure://||' ++ sed -e 's/.json$//' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DgqGgQCDOt +++ mktemp ++ local LAST_ERR=/tmp/tmp.1UNg6istwp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DgqGgQCDOt ++ cat /tmp/tmp.1UNg6istwp ++ rm /tmp/tmp.DgqGgQCDOt /tmp/tmp.1UNg6istwp ++ return 0 + backup_dest_azure=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z + curl -s https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z/rs0/myApp.test.gz + gunzip + run_recovery_check backup-azure-blob some-name-rs0 + local backup=backup-azure-blob + local cluster=some-name-rs0 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ctzzSwhS88 +++ mktemp ++ local LAST_ERR=/tmp/tmp.O1xlUL0VxL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ctzzSwhS88 ++ cat /tmp/tmp.O1xlUL0VxL ++ rm /tmp/tmp.ctzzSwhS88 /tmp/tmp.O1xlUL0VxL ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Qb9QQSaG46 ++ mktemp + local LAST_ERR=/tmp/tmp.9eu83zf2ol + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qb9QQSaG46 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6f2bb6d7-132c-4bee-b19c-f49320015664") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.9eu83zf2ol + rm /tmp/tmp.Qb9QQSaG46 /tmp/tmp.9eu83zf2ol + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-10367 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:02:24+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-10367 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.hLKrAoXZ69 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LwUDtRNHf6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hLKrAoXZ69 ++ cat /tmp/tmp.LwUDtRNHf6 ++ rm /tmp/tmp.hLKrAoXZ69 /tmp/tmp.LwUDtRNHf6 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3LzIgauMfs ++ mktemp + local LAST_ERR=/tmp/tmp.LEGoxcUkD0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3LzIgauMfs + cat /tmp/tmp.LEGoxcUkD0 + rm /tmp/tmp.3LzIgauMfs /tmp/tmp.LEGoxcUkD0 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find-2nd.json /tmp/tmp.lIZgOnVaLV/find-2nd + run_restore backup-azure-blob + local backup_name=backup-azure-blob + /usr/bin/sed -e 's/name:/name: restore-backup-azure-blob/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KMdoYwga4m ++ mktemp + local LAST_ERR=/tmp/tmp.MkFOeEF4RK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's/backupName:/backupName: backup-azure-blob/' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KMdoYwga4m perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob created + cat /tmp/tmp.MkFOeEF4RK + rm /tmp/tmp.KMdoYwga4m /tmp/tmp.MkFOeEF4RK + return 0 + wait_restore backup-azure-blob some-name + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-azure-blob to reach ready state.....OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vjyU8VlTNW +++ mktemp ++ local LAST_ERR=/tmp/tmp.gB2Kt7QRGj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vjyU8VlTNW ++ cat /tmp/tmp.gB2Kt7QRGj ++ rm /tmp/tmp.vjyU8VlTNW /tmp/tmp.gB2Kt7QRGj ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + simple_data_check some-name-rs0 3 0 + local cluster_name=some-name-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:02:52+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0a4RwcZfn9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0iq0AloULv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0a4RwcZfn9 ++ cat /tmp/tmp.0iq0AloULv ++ rm /tmp/tmp.0a4RwcZfn9 /tmp/tmp.0iq0AloULv ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jsgtIvwsSg ++ mktemp + local LAST_ERR=/tmp/tmp.2pVW90AghR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jsgtIvwsSg + cat /tmp/tmp.2pVW90AghR + rm /tmp/tmp.jsgtIvwsSg /tmp/tmp.2pVW90AghR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:02:56+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZmGx3fq50H +++ mktemp ++ local LAST_ERR=/tmp/tmp.BNeIsboYYU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZmGx3fq50H ++ cat /tmp/tmp.BNeIsboYYU ++ rm /tmp/tmp.ZmGx3fq50H /tmp/tmp.BNeIsboYYU ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IYU8kOUIJi ++ mktemp + local LAST_ERR=/tmp/tmp.Y0F83I9NYn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IYU8kOUIJi + cat /tmp/tmp.Y0F83I9NYn + rm /tmp/tmp.IYU8kOUIJi /tmp/tmp.Y0F83I9NYn + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:02:58+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wsM3KrSSn3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YZb4Re9pDW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wsM3KrSSn3 ++ cat /tmp/tmp.YZb4Re9pDW ++ rm /tmp/tmp.wsM3KrSSn3 /tmp/tmp.YZb4Re9pDW ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.HTL8JaX0Lw ++ mktemp + local LAST_ERR=/tmp/tmp.aRmPnPsxMy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HTL8JaX0Lw + cat /tmp/tmp.aRmPnPsxMy + rm /tmp/tmp.HTL8JaX0Lw /tmp/tmp.aRmPnPsxMy + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + desc 'check backup and restore -- minio' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- minio ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ sed 's|s3://||' ++ sed 's|azure://||' ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tBcxChcWtg +++ mktemp ++ local LAST_ERR=/tmp/tmp.NnYSnrHUJW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tBcxChcWtg ++ cat /tmp/tmp.NnYSnrHUJW ++ rm /tmp/tmp.tBcxChcWtg /tmp/tmp.NnYSnrHUJW ++ return 0 + backup_dest_minio=operator-testing/2025-05-05T11:00:12Z + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-05-05T11:00:12Z/rs0/ + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.oxg2XhzX3O ++ mktemp + local LAST_ERR=/tmp/tmp.VuI447ThDD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-05-05T11:00:12Z/rs0/ + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oxg2XhzX3O + cat /tmp/tmp.VuI447ThDD If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-10367 + rm /tmp/tmp.oxg2XhzX3O /tmp/tmp.VuI447ThDD + return 0 2025-05-05 11:00:16 55 myApp.test.gz + run_recovery_check backup-minio some-name-rs0 + local backup=backup-minio + local cluster=some-name-rs0 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qGPK6gX8QF +++ mktemp ++ local LAST_ERR=/tmp/tmp.vJLk8LWevn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qGPK6gX8QF ++ cat /tmp/tmp.vJLk8LWevn ++ rm /tmp/tmp.qGPK6gX8QF /tmp/tmp.vJLk8LWevn ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.MmWoaEvHES ++ mktemp + local LAST_ERR=/tmp/tmp.66WncGrjYZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MmWoaEvHES Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("46a497b3-e4a6-4151-81b9-969d0bc7db55") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.66WncGrjYZ + rm /tmp/tmp.MmWoaEvHES /tmp/tmp.66WncGrjYZ + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-10367 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:03:12+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-10367 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XrgVlkgSbk +++ mktemp ++ local LAST_ERR=/tmp/tmp.CAlGIdXFCc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XrgVlkgSbk ++ cat /tmp/tmp.CAlGIdXFCc ++ rm /tmp/tmp.XrgVlkgSbk /tmp/tmp.CAlGIdXFCc ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.TYdeYAfXMw ++ mktemp + local LAST_ERR=/tmp/tmp.Dx0AJvoekB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TYdeYAfXMw + cat /tmp/tmp.Dx0AJvoekB + rm /tmp/tmp.TYdeYAfXMw /tmp/tmp.Dx0AJvoekB + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find-2nd.json /tmp/tmp.lIZgOnVaLV/find-2nd + run_restore backup-minio + local backup_name=backup-minio + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.TT9GFS8POe ++ mktemp + local LAST_ERR=/tmp/tmp.4gwB7uiJ7c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TT9GFS8POe perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.4gwB7uiJ7c + rm /tmp/tmp.TT9GFS8POe /tmp/tmp.4gwB7uiJ7c + return 0 + wait_restore backup-minio some-name + local backup_name=backup-minio + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio to reach ready state......OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8BKEebMTOg +++ mktemp ++ local LAST_ERR=/tmp/tmp.tcj1uG9zL4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8BKEebMTOg ++ cat /tmp/tmp.tcj1uG9zL4 ++ rm /tmp/tmp.8BKEebMTOg /tmp/tmp.tcj1uG9zL4 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + simple_data_check some-name-rs0 3 0 + local cluster_name=some-name-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:03:36+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.NeRhMsm48f +++ mktemp ++ local LAST_ERR=/tmp/tmp.OHitQsI5X5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NeRhMsm48f ++ cat /tmp/tmp.OHitQsI5X5 ++ rm /tmp/tmp.NeRhMsm48f /tmp/tmp.OHitQsI5X5 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qNaFH3A1q9 ++ mktemp + local LAST_ERR=/tmp/tmp.RhspqDuNNm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qNaFH3A1q9 + cat /tmp/tmp.RhspqDuNNm + rm /tmp/tmp.qNaFH3A1q9 /tmp/tmp.RhspqDuNNm + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:03:39+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2qk6FNyBoc +++ mktemp ++ local LAST_ERR=/tmp/tmp.8RZJtOB0EK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2qk6FNyBoc ++ cat /tmp/tmp.8RZJtOB0EK ++ rm /tmp/tmp.2qk6FNyBoc /tmp/tmp.8RZJtOB0EK ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fQHW2POTvd ++ mktemp + local LAST_ERR=/tmp/tmp.A82BEt4VHt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fQHW2POTvd + cat /tmp/tmp.A82BEt4VHt + rm /tmp/tmp.fQHW2POTvd /tmp/tmp.A82BEt4VHt + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:03:42+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r8HKin7b4p +++ mktemp ++ local LAST_ERR=/tmp/tmp.srRPyKa5O2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r8HKin7b4p ++ cat /tmp/tmp.srRPyKa5O2 ++ rm /tmp/tmp.r8HKin7b4p /tmp/tmp.srRPyKa5O2 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.79zoTg6h45 ++ mktemp + local LAST_ERR=/tmp/tmp.HPY2VPNuIu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.79zoTg6h45 + cat /tmp/tmp.HPY2VPNuIu + rm /tmp/tmp.79zoTg6h45 /tmp/tmp.HPY2VPNuIu + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + run_mongo 'use myApp\n db.dropUser("test1user")' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.dropUser("test1user")' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RSnd8oddg6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q53kfOIkOh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RSnd8oddg6 ++ cat /tmp/tmp.Q53kfOIkOh ++ rm /tmp/tmp.RSnd8oddg6 /tmp/tmp.Q53kfOIkOh ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.dropUser("test1user")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.G9Mi8ampMx ++ mktemp + local LAST_ERR=/tmp/tmp.4RqODjDfrJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.dropUser("test1user")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G9Mi8ampMx Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2fc4c34f-1437-46fd-89da-97d2f674ba9b") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.4RqODjDfrJ + rm /tmp/tmp.G9Mi8ampMx /tmp/tmp.4RqODjDfrJ + return 0 + run_mongo 'use myApp\n db.dropRole("test1role")' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.dropRole("test1role")' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tfAbNFUiWY +++ mktemp ++ local LAST_ERR=/tmp/tmp.eSc9CvDfLG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tfAbNFUiWY ++ cat /tmp/tmp.eSc9CvDfLG ++ rm /tmp/tmp.tfAbNFUiWY /tmp/tmp.eSc9CvDfLG ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.dropRole("test1role")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CGvDda1L4e ++ mktemp + local LAST_ERR=/tmp/tmp.r8JujaPhv0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.dropRole("test1role")\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CGvDda1L4e Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a2ab3002-3aef-44f5-9239-e56d7aa26d71") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.r8JujaPhv0 + rm /tmp/tmp.CGvDda1L4e /tmp/tmp.r8JujaPhv0 + return 0 + desc 'selective restore -- minio' + set +o xtrace ----------------------------------------------------------------------------------- selective restore -- minio ----------------------------------------------------------------------------------- + run_recovery_check_selective backup-minio some-name-rs0 myApp test2 false test1user test1role + local backup=backup-minio + local cluster=some-name-rs0 + local database=myApp + local collection=test2 + local with_users_and_roles=false + local username=test1user + local role=test1role + restore_name=restore-backup-minio-selective + run_mongo 'use myApp\n db.test2.drop()' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test2.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6pcNDeHGf7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TVJ84utdlR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6pcNDeHGf7 ++ cat /tmp/tmp.TVJ84utdlR ++ rm /tmp/tmp.6pcNDeHGf7 /tmp/tmp.TVJ84utdlR ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ou4bSZs7oK ++ mktemp + local LAST_ERR=/tmp/tmp.67CcggTshC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ou4bSZs7oK Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e347c0fa-e9e6-45b7-9236-381e596fc7c4") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.67CcggTshC + rm /tmp/tmp.Ou4bSZs7oK /tmp/tmp.67CcggTshC + return 0 ++ collection_exists test2 ++ local collection=test2 ++ run_mongo 'use myApp\n JSON.stringify(db.getCollectionNames())' myApp:myPass@some-name-rs0.demand-backup-10367 '' '' --quiet ++ local 'command=use myApp\n JSON.stringify(db.getCollectionNames())' ++ grep -v 'switched to' ++ local uri=myApp:myPass@some-name-rs0.demand-backup-10367 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ jq 'index("test2") != null' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vCE5yqlzrI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dI8LvKqMQ3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vCE5yqlzrI +++ cat /tmp/tmp.dI8LvKqMQ3 +++ rm /tmp/tmp.vCE5yqlzrI /tmp/tmp.dI8LvKqMQ3 +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KaUIu055i5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HezyjhgKXH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KaUIu055i5 ++ cat /tmp/tmp.HezyjhgKXH ++ rm /tmp/tmp.KaUIu055i5 /tmp/tmp.HezyjhgKXH ++ return 0 + [[ false == \t\r\u\e ]] + yq '.metadata.name="restore-backup-minio-selective"' + kubectl_bin apply -f - + yq '.spec.backupName="backup-minio"' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore.yml + yq '.spec.selective.namespaces[0]="myApp.test"' + yq .spec.selective.withUsersAndRoles=false ++ mktemp + local LAST_OUT=/tmp/tmp.7JMs79GQ8Y ++ mktemp + local LAST_ERR=/tmp/tmp.qpud9xa5Bc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7JMs79GQ8Y perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-selective created + cat /tmp/tmp.qpud9xa5Bc + rm /tmp/tmp.7JMs79GQ8Y /tmp/tmp.qpud9xa5Bc + return 0 + wait_restore backup-minio-selective some-name + local backup_name=backup-minio-selective + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-selective to reach ready state......OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9rYnXluhOa +++ mktemp ++ local LAST_ERR=/tmp/tmp.To0VBAOSFF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9rYnXluhOa ++ cat /tmp/tmp.To0VBAOSFF ++ rm /tmp/tmp.9rYnXluhOa /tmp/tmp.To0VBAOSFF ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo ++ collection_exists test2 ++ local collection=test2 ++ run_mongo 'use myApp\n JSON.stringify(db.getCollectionNames())' myApp:myPass@some-name-rs0.demand-backup-10367 '' '' --quiet ++ grep -v 'switched to' ++ local 'command=use myApp\n JSON.stringify(db.getCollectionNames())' ++ local uri=myApp:myPass@some-name-rs0.demand-backup-10367 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ jq 'index("test2") != null' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XImd7DTgZg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JEHfZoyIFu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XImd7DTgZg +++ cat /tmp/tmp.JEHfZoyIFu +++ rm /tmp/tmp.XImd7DTgZg /tmp/tmp.JEHfZoyIFu +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gWgDgfEQmH +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ie7zlxKRuP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gWgDgfEQmH ++ cat /tmp/tmp.Ie7zlxKRuP ++ rm /tmp/tmp.gWgDgfEQmH /tmp/tmp.Ie7zlxKRuP ++ return 0 + [[ false == \t\r\u\e ]] + [[ false == \t\r\u\e ]] + [[ false == \f\a\l\s\e ]] ++ user_exists test1user ++ local username=test1user ++ run_mongo 'use myApp\n JSON.stringify(db.getUsers())' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 '' '' --quiet ++ grep -v 'switched to' ++ local 'command=use myApp\n JSON.stringify(db.getUsers())' ++ local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ jq 'any(.[]; ._id==myApp.test1user)' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.l9ADbJiKrK jq: error: myApp/0 is not defined at , line 1: any(.[]; ._id==myApp.test1user) jq: 1 compile error ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wF3ifliVaD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.l9ADbJiKrK +++ cat /tmp/tmp.wF3ifliVaD +++ rm /tmp/tmp.l9ADbJiKrK /tmp/tmp.wF3ifliVaD +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getUsers())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rEQKmyz7PK +++ mktemp ++ local LAST_ERR=/tmp/tmp.CyfPTOP82C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getUsers())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rEQKmyz7PK ++ cat /tmp/tmp.CyfPTOP82C ++ rm /tmp/tmp.rEQKmyz7PK /tmp/tmp.CyfPTOP82C ++ return 0 + [[ '' == \t\r\u\e ]] ++ role_exists test1role ++ local role=test1role ++ jq 'any(.[]; ._id==myApp.test1role)' ++ run_mongo 'use myApp\n JSON.stringify(db.getRoles())' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 '' '' --quiet ++ local 'command=use myApp\n JSON.stringify(db.getRoles())' ++ local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 jq: error: myApp/0 is not defined at , line 1: any(.[]; ._id==myApp.test1role) ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ grep -v 'switched to' jq: 1 compile error +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8tB3WBRwqB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AcFM7xPD3M +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8tB3WBRwqB +++ cat /tmp/tmp.AcFM7xPD3M +++ rm /tmp/tmp.8tB3WBRwqB /tmp/tmp.AcFM7xPD3M +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getRoles())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FaZzARdiHf +++ mktemp ++ local LAST_ERR=/tmp/tmp.dUKO4g2MLu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getRoles())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FaZzARdiHf ++ cat /tmp/tmp.dUKO4g2MLu ++ rm /tmp/tmp.FaZzARdiHf /tmp/tmp.dUKO4g2MLu ++ return 0 + [[ '' == \t\r\u\e ]] + kubectl_bin delete psmdb-restore restore-backup-minio-selective ++ mktemp + local LAST_OUT=/tmp/tmp.I3vCN4AXRQ ++ mktemp + local LAST_ERR=/tmp/tmp.uSb795j2iO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb-restore restore-backup-minio-selective + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I3vCN4AXRQ perconaservermongodbrestore.psmdb.percona.com "restore-backup-minio-selective" deleted + cat /tmp/tmp.uSb795j2iO + rm /tmp/tmp.I3vCN4AXRQ /tmp/tmp.uSb795j2iO + return 0 + run_mongo 'use myApp\n db.test2.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test2.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z8NpkG3Win +++ mktemp ++ local LAST_ERR=/tmp/tmp.UAXIj7lNC9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z8NpkG3Win ++ cat /tmp/tmp.UAXIj7lNC9 ++ rm /tmp/tmp.z8NpkG3Win /tmp/tmp.UAXIj7lNC9 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.KkQHVTeoa8 ++ mktemp + local LAST_ERR=/tmp/tmp.r1ER3EkMG7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KkQHVTeoa8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("09e74f57-555b-4976-a9f9-0124534b4148") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.r1ER3EkMG7 + rm /tmp/tmp.KkQHVTeoa8 /tmp/tmp.r1ER3EkMG7 + return 0 + desc 'selective restore with users and roles -- minio' + set +o xtrace ----------------------------------------------------------------------------------- selective restore with users and roles -- minio ----------------------------------------------------------------------------------- + run_recovery_check_selective backup-minio some-name-rs0 myApp test2 true test1user test1role + local backup=backup-minio + local cluster=some-name-rs0 + local database=myApp + local collection=test2 + local with_users_and_roles=true + local username=test1user + local role=test1role + restore_name=restore-backup-minio-selective + run_mongo 'use myApp\n db.test2.drop()' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test2.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5lnjXch8kD +++ mktemp ++ local LAST_ERR=/tmp/tmp.tvvdxegS3e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5lnjXch8kD ++ cat /tmp/tmp.tvvdxegS3e ++ rm /tmp/tmp.5lnjXch8kD /tmp/tmp.tvvdxegS3e ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Yk2KTYZWLy ++ mktemp + local LAST_ERR=/tmp/tmp.fBe3T8gcRT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test2.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yk2KTYZWLy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b3786855-aaf4-42ca-8c5c-1147ae29fbc6") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.fBe3T8gcRT + rm /tmp/tmp.Yk2KTYZWLy /tmp/tmp.fBe3T8gcRT + return 0 ++ collection_exists test2 ++ local collection=test2 ++ run_mongo 'use myApp\n JSON.stringify(db.getCollectionNames())' myApp:myPass@some-name-rs0.demand-backup-10367 '' '' --quiet ++ grep -v 'switched to' ++ local 'command=use myApp\n JSON.stringify(db.getCollectionNames())' ++ jq 'index("test2") != null' ++ local uri=myApp:myPass@some-name-rs0.demand-backup-10367 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0AEsLatOCM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P1FHgBz7d8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0AEsLatOCM +++ cat /tmp/tmp.P1FHgBz7d8 +++ rm /tmp/tmp.0AEsLatOCM /tmp/tmp.P1FHgBz7d8 +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.20Ep9Wi5Mb +++ mktemp ++ local LAST_ERR=/tmp/tmp.1baJiFtLwM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.20Ep9Wi5Mb ++ cat /tmp/tmp.1baJiFtLwM ++ rm /tmp/tmp.20Ep9Wi5Mb /tmp/tmp.1baJiFtLwM ++ return 0 + [[ false == \t\r\u\e ]] + yq '.metadata.name="restore-backup-minio-selective"' + yq '.spec.backupName="backup-minio"' + yq '.spec.selective.namespaces[0]="myApp.test"' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore.yml + yq .spec.selective.withUsersAndRoles=true + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.50hYGCk0wv ++ mktemp + local LAST_ERR=/tmp/tmp.UdoZ0GavAG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.50hYGCk0wv perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-selective created + cat /tmp/tmp.UdoZ0GavAG + rm /tmp/tmp.50hYGCk0wv /tmp/tmp.UdoZ0GavAG + return 0 + wait_restore backup-minio-selective some-name + local backup_name=backup-minio-selective + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-selective to reach ready state......OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DerOr8Hgi1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Sqj39HRPO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DerOr8Hgi1 ++ cat /tmp/tmp.1Sqj39HRPO ++ rm /tmp/tmp.DerOr8Hgi1 /tmp/tmp.1Sqj39HRPO ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo ++ collection_exists test2 ++ local collection=test2 ++ run_mongo 'use myApp\n JSON.stringify(db.getCollectionNames())' myApp:myPass@some-name-rs0.demand-backup-10367 '' '' --quiet ++ local 'command=use myApp\n JSON.stringify(db.getCollectionNames())' ++ grep -v 'switched to' ++ local uri=myApp:myPass@some-name-rs0.demand-backup-10367 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ jq 'index("test2") != null' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ccsed99NlJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TC2wfAdaD3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ccsed99NlJ +++ cat /tmp/tmp.TC2wfAdaD3 +++ rm /tmp/tmp.ccsed99NlJ /tmp/tmp.TC2wfAdaD3 +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hGoTw8F8NS +++ mktemp ++ local LAST_ERR=/tmp/tmp.gerATkuIiw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getCollectionNames())\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hGoTw8F8NS ++ cat /tmp/tmp.gerATkuIiw ++ rm /tmp/tmp.hGoTw8F8NS /tmp/tmp.gerATkuIiw ++ return 0 + [[ false == \t\r\u\e ]] + [[ true == \t\r\u\e ]] ++ user_exists test1user ++ local username=test1user ++ run_mongo 'use myApp\n JSON.stringify(db.getUsers())' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 '' '' --quiet ++ grep -v 'switched to' ++ jq 'any(.[]; ._id==myApp.test1user)' ++ local 'command=use myApp\n JSON.stringify(db.getUsers())' ++ local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local jq: error: myApp/0 is not defined at , line 1: any(.[]; ._id==myApp.test1user) jq: 1 compile error +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YfqhlnRh8d ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LwbL8Lpp1Y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YfqhlnRh8d +++ cat /tmp/tmp.LwbL8Lpp1Y +++ rm /tmp/tmp.YfqhlnRh8d /tmp/tmp.LwbL8Lpp1Y +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getUsers())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LcbOGPwhi0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RGS59WoA2D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getUsers())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LcbOGPwhi0 ++ cat /tmp/tmp.RGS59WoA2D ++ rm /tmp/tmp.LcbOGPwhi0 /tmp/tmp.RGS59WoA2D ++ return 0 + [[ '' == \f\a\l\s\e ]] ++ role_exists test1role ++ local role=test1role ++ run_mongo 'use myApp\n JSON.stringify(db.getRoles())' userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 '' '' --quiet ++ local 'command=use myApp\n JSON.stringify(db.getRoles())' ++ local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ jq 'any(.[]; ._id==myApp.test1role)' ++ grep -v 'switched to' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.twZQlx3TmG ++++ mktemp jq: error: myApp/0 is not defined at , line 1: any(.[]; ._id==myApp.test1role) jq: 1 compile error +++ local LAST_ERR=/tmp/tmp.19VwRHlRtX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.twZQlx3TmG +++ cat /tmp/tmp.19VwRHlRtX +++ rm /tmp/tmp.twZQlx3TmG /tmp/tmp.19VwRHlRtX +++ return 0 ++ local client_container=psmdb-client-66f577db5f-k6mmp ++ local mongo_flag=--quiet ++ [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367 == *cfg* ]] ++ replica_set=rs0 ++ kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getRoles())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qAWez3SgCu +++ mktemp ++ local LAST_ERR=/tmp/tmp.IEIa9qWaIX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n JSON.stringify(db.getRoles())\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qAWez3SgCu ++ cat /tmp/tmp.IEIa9qWaIX ++ rm /tmp/tmp.qAWez3SgCu /tmp/tmp.IEIa9qWaIX ++ return 0 + [[ '' == \f\a\l\s\e ]] + desc 'restore from backup source, with storageName -- minio' + set +o xtrace ----------------------------------------------------------------------------------- restore from backup source, with storageName -- minio ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed 's|azure://||' ++ sed 's|s3://||' ++ sed -e 's/.json$//' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0T7vyxeuy2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FBGuKvbfla ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0T7vyxeuy2 ++ cat /tmp/tmp.FBGuKvbfla ++ rm /tmp/tmp.0T7vyxeuy2 /tmp/tmp.FBGuKvbfla ++ return 0 + backup_dest_minio=operator-testing/2025-05-05T11:00:12Z + run_recovery_check_bkp_source backup-minio operator-testing/2025-05-05T11:00:12Z some-name-rs0 backup-minio-source-0 + local backup=backup-minio + local backup_dest=operator-testing/2025-05-05T11:00:12Z + local cluster=some-name-rs0 + local source=backup-minio-source-0 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.56PSEzDktS +++ mktemp ++ local LAST_ERR=/tmp/tmp.8CaJHdpH2Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.56PSEzDktS ++ cat /tmp/tmp.8CaJHdpH2Z ++ rm /tmp/tmp.56PSEzDktS /tmp/tmp.8CaJHdpH2Z ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.LS0nYd4KlD ++ mktemp + local LAST_ERR=/tmp/tmp.zUZZ7ykV9q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LS0nYd4KlD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a2837894-5061-4772-ba56-ffd14498e6ed") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zUZZ7ykV9q + rm /tmp/tmp.LS0nYd4KlD /tmp/tmp.zUZZ7ykV9q + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-10367 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:05:13+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-10367 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local LAST_OUT=/tmp/tmp.QxMMGBC6sP +++ mktemp ++ local LAST_ERR=/tmp/tmp.bNSZZnZjk4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QxMMGBC6sP ++ cat /tmp/tmp.bNSZZnZjk4 ++ rm /tmp/tmp.QxMMGBC6sP /tmp/tmp.bNSZZnZjk4 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tcGMt0kf7u ++ mktemp + local LAST_ERR=/tmp/tmp.sjGfSPjbu5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tcGMt0kf7u + cat /tmp/tmp.sjGfSPjbu5 + rm /tmp/tmp.tcGMt0kf7u /tmp/tmp.sjGfSPjbu5 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find-2nd.json /tmp/tmp.lIZgOnVaLV/find-2nd + run_restore_backupsource backup-minio-source-0 operator-testing/2025-05-05T11:00:12Z + local backupName=backup-minio-source-0 + local backupDest=operator-testing/2025-05-05T11:00:12Z + local storageName= + desc 'run restore restore-backup-minio-source-0 from backup backup-minio-source-0 destination is operator-testing/2025-05-05T11:00:12Z' + set +o xtrace ----------------------------------------------------------------------------------- run restore restore-backup-minio-source-0 from backup backup-minio-source-0 destination is operator-testing/2025-05-05T11:00:12Z ----------------------------------------------------------------------------------- + '[' -z '' ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore-backupsource.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-source-0/' + /usr/bin/sed -e 's|BACKUP-NAME|operator-testing/2025-05-05T11:00:12Z|' + kubectl_bin apply -f - + /usr/bin/sed -e /storageName/d ++ mktemp + local LAST_OUT=/tmp/tmp.EH0xoscNHg ++ mktemp + local LAST_ERR=/tmp/tmp.dXauHrN735 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EH0xoscNHg perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-source-0 created + cat /tmp/tmp.dXauHrN735 + rm /tmp/tmp.EH0xoscNHg /tmp/tmp.dXauHrN735 + return 0 + return + wait_restore backup-minio-source-0 some-name + local backup_name=backup-minio-source-0 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-source-0 to reach ready state......OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sye2stKyPr +++ mktemp ++ local LAST_ERR=/tmp/tmp.jFfxvsrHjL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Sye2stKyPr ++ cat /tmp/tmp.jFfxvsrHjL ++ rm /tmp/tmp.Sye2stKyPr /tmp/tmp.jFfxvsrHjL ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + simple_data_check some-name-rs0 3 0 + local cluster_name=some-name-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:05:40+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.35KMuqCj0U +++ mktemp ++ local LAST_ERR=/tmp/tmp.eV89f5CVFj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.35KMuqCj0U ++ cat /tmp/tmp.eV89f5CVFj ++ rm /tmp/tmp.35KMuqCj0U /tmp/tmp.eV89f5CVFj ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DOBno108bB ++ mktemp + local LAST_ERR=/tmp/tmp.xLWXCuNAnP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DOBno108bB + cat /tmp/tmp.xLWXCuNAnP + rm /tmp/tmp.DOBno108bB /tmp/tmp.xLWXCuNAnP + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:05:42+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EUDRRUn2jX +++ mktemp ++ local LAST_ERR=/tmp/tmp.lbaaXVsIMd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EUDRRUn2jX ++ cat /tmp/tmp.lbaaXVsIMd ++ rm /tmp/tmp.EUDRRUn2jX /tmp/tmp.lbaaXVsIMd ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.AOcZdOuZS7 ++ mktemp + local LAST_ERR=/tmp/tmp.YljIxpXyiV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AOcZdOuZS7 + cat /tmp/tmp.YljIxpXyiV + rm /tmp/tmp.AOcZdOuZS7 /tmp/tmp.YljIxpXyiV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:05:46+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Paw3jMohyH +++ mktemp ++ local LAST_ERR=/tmp/tmp.z8kT0lMMU6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Paw3jMohyH ++ cat /tmp/tmp.z8kT0lMMU6 ++ rm /tmp/tmp.Paw3jMohyH /tmp/tmp.z8kT0lMMU6 ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zoMDyMQrCc ++ mktemp + local LAST_ERR=/tmp/tmp.BvmzXEQixp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zoMDyMQrCc + cat /tmp/tmp.BvmzXEQixp + rm /tmp/tmp.zoMDyMQrCc /tmp/tmp.BvmzXEQixp + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + desc 'restore from backup source, no storageName -- minio' + set +o xtrace ----------------------------------------------------------------------------------- restore from backup source, no storageName -- minio ----------------------------------------------------------------------------------- ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|azure://||' ++ sed 's|s3://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o2i7OTZ86h +++ mktemp ++ local LAST_ERR=/tmp/tmp.iFmDQ5mnoT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o2i7OTZ86h ++ cat /tmp/tmp.iFmDQ5mnoT ++ rm /tmp/tmp.o2i7OTZ86h /tmp/tmp.iFmDQ5mnoT ++ return 0 + backup_dest_minio=operator-testing/2025-05-05T11:00:12Z + run_recovery_check_bkp_source backup-minio operator-testing/2025-05-05T11:00:12Z some-name-rs0 backup-minio-source-1 + local backup=backup-minio + local backup_dest=operator-testing/2025-05-05T11:00:12Z + local cluster=some-name-rs0 + local source=backup-minio-source-1 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-10367 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HeS1uZlDMq +++ mktemp ++ local LAST_ERR=/tmp/tmp.nfrcp2eogX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HeS1uZlDMq ++ cat /tmp/tmp.nfrcp2eogX ++ rm /tmp/tmp.HeS1uZlDMq /tmp/tmp.nfrcp2eogX ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.2JFqayrQTR ++ mktemp + local LAST_ERR=/tmp/tmp.ponkNiOuYQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2JFqayrQTR Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e77d5577-4ba4-4ceb-b772-1248f56073f7") } Percona Server for MongoDB server version: v8.0.8-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ponkNiOuYQ + rm /tmp/tmp.2JFqayrQTR /tmp/tmp.ponkNiOuYQ + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-10367 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:05:53+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-10367 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tmQ1wqKx4D +++ mktemp ++ local LAST_ERR=/tmp/tmp.AMYocUXECl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tmQ1wqKx4D ++ cat /tmp/tmp.AMYocUXECl ++ rm /tmp/tmp.tmQ1wqKx4D /tmp/tmp.AMYocUXECl ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yF9p0MdIFt ++ mktemp + local LAST_ERR=/tmp/tmp.7yWBO3FIbr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yF9p0MdIFt + cat /tmp/tmp.7yWBO3FIbr + rm /tmp/tmp.yF9p0MdIFt /tmp/tmp.7yWBO3FIbr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find-2nd.json /tmp/tmp.lIZgOnVaLV/find-2nd + run_restore_backupsource backup-minio-source-1 operator-testing/2025-05-05T11:00:12Z + local backupName=backup-minio-source-1 + local backupDest=operator-testing/2025-05-05T11:00:12Z + local storageName= + desc 'run restore restore-backup-minio-source-1 from backup backup-minio-source-1 destination is operator-testing/2025-05-05T11:00:12Z' + set +o xtrace ----------------------------------------------------------------------------------- run restore restore-backup-minio-source-1 from backup backup-minio-source-1 destination is operator-testing/2025-05-05T11:00:12Z ----------------------------------------------------------------------------------- + '[' -z '' ']' + /usr/bin/sed -e 's/name:/name: restore-backup-minio-source-1/' + /usr/bin/sed -e 's|BACKUP-NAME|operator-testing/2025-05-05T11:00:12Z|' + kubectl_bin apply -f - + /usr/bin/sed -e /storageName/d + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/restore-backupsource.yml ++ mktemp + local LAST_OUT=/tmp/tmp.2z04hmgTEP ++ mktemp + local LAST_ERR=/tmp/tmp.ZFxWmX9pg0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2z04hmgTEP perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-source-1 created + cat /tmp/tmp.ZFxWmX9pg0 + rm /tmp/tmp.2z04hmgTEP /tmp/tmp.ZFxWmX9pg0 + return 0 + return + wait_restore backup-minio-source-1 some-name + local backup_name=backup-minio-source-1 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace waiting psmdb-restore/restore-backup-minio-source-1 to reach ready state......OK + '[' 1 -eq 1 ']' + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XVqPwMFbHr +++ mktemp ++ local LAST_ERR=/tmp/tmp.QHBiwGdJ3T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XVqPwMFbHr ++ cat /tmp/tmp.QHBiwGdJ3T ++ rm /tmp/tmp.XVqPwMFbHr /tmp/tmp.QHBiwGdJ3T ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + simple_data_check some-name-rs0 3 0 + local cluster_name=some-name-rs0 + let last_pod=3-1 + local isSharded=0 + local cluster_pfx= + '[' 0 -eq 1 ']' ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:06:20+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.ZkYF0BKbv2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0QeXU5imCy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZkYF0BKbv2 ++ cat /tmp/tmp.0QeXU5imCy ++ rm /tmp/tmp.ZkYF0BKbv2 /tmp/tmp.0QeXU5imCy ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hzxXCakMpB ++ mktemp + local LAST_ERR=/tmp/tmp.ifdM3FsZc3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hzxXCakMpB + cat /tmp/tmp.ifdM3FsZc3 + rm /tmp/tmp.hzxXCakMpB /tmp/tmp.ifdM3FsZc3 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:06:22+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KVuFB19cnm +++ mktemp ++ local LAST_ERR=/tmp/tmp.DkUIiCskFt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KVuFB19cnm ++ cat /tmp/tmp.DkUIiCskFt ++ rm /tmp/tmp.KVuFB19cnm /tmp/tmp.DkUIiCskFt ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0t5TseH7k8 ++ mktemp + local LAST_ERR=/tmp/tmp.3EKL9OwHSp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0t5TseH7k8 + cat /tmp/tmp.3EKL9OwHSp + rm /tmp/tmp.0t5TseH7k8 /tmp/tmp.3EKL9OwHSp + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + for i in '$(seq 0 $last_pod)' + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-05-05T11:06:26+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A6CE91p4Q1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.a1w9Fzyo9L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A6CE91p4Q1 ++ cat /tmp/tmp.a1w9Fzyo9L ++ rm /tmp/tmp.A6CE91p4Q1 /tmp/tmp.a1w9Fzyo9L ++ return 0 + local client_container=psmdb-client-66f577db5f-k6mmp + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bYuGZEl9lf ++ mktemp + local LAST_ERR=/tmp/tmp.T8W8Io86d7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-k6mmp -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-10367.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bYuGZEl9lf + cat /tmp/tmp.T8W8Io86d7 + rm /tmp/tmp.bYuGZEl9lf /tmp/tmp.T8W8Io86d7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/compare/find.json /tmp/tmp.lIZgOnVaLV/find + desc 'delete backup and check if it is removed from bucket -- minio' + set +o xtrace ----------------------------------------------------------------------------------- delete backup and check if it is removed from bucket -- minio ----------------------------------------------------------------------------------- + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.JQXqDspcde ++ mktemp + local LAST_ERR=/tmp/tmp.lzk71PoPJQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JQXqDspcde perconaservermongodbbackup.psmdb.percona.com "backup-aws-s3" deleted perconaservermongodbbackup.psmdb.percona.com "backup-azure-blob" deleted perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted + cat /tmp/tmp.lzk71PoPJQ + rm /tmp/tmp.JQXqDspcde /tmp/tmp.lzk71PoPJQ + return 0 ++ grep -c operator-testing/2025-05-05T11:00:12Z ++ cat ++ kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZDogCQaf16 +++ mktemp ++ local LAST_ERR=/tmp/tmp.o6mp4NFmmp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZDogCQaf16 ++ cat /tmp/tmp.o6mp4NFmmp ++ rm /tmp/tmp.ZDogCQaf16 /tmp/tmp.o6mp4NFmmp ++ return 0 + backup_exists=0 + [[ 0 -eq 1 ]] + '[' -z '' ']' + check_backup_deletion https://s3.amazonaws.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z aws-s3 + path=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z + storage_name=aws-s3 + retry=0 ++ curl -sw '%{http_code}' -o /dev/null https://s3.amazonaws.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z + [[ 403 -eq 403 ]] + check_backup_deletion https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z gcp-cs + path=https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z + storage_name=gcp-cs + retry=0 ++ curl -sw '%{http_code}' -o /dev/null https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z + [[ 404 -eq 403 ]] ++ curl -sw '%{http_code}' -o /dev/null https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z + [[ 404 -eq 404 ]] + check_backup_deletion https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z azure-blob + path=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z + storage_name=azure-blob + retry=0 ++ curl -sw '%{http_code}' -o /dev/null https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z + [[ 404 -eq 403 ]] ++ curl -sw '%{http_code}' -o /dev/null https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z + [[ 404 -eq 404 ]] + desc 'checking backup deletion without cr' + set +o xtrace ----------------------------------------------------------------------------------- checking backup deletion without cr ----------------------------------------------------------------------------------- + run_backup minio + local storage=minio + local backup_name=backup-minio + local type=logical + desc 'run backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.pyGn9706aN ++ mktemp + local LAST_ERR=/tmp/tmp.ZosmrDNEg3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pyGn9706aN perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.ZosmrDNEg3 + rm /tmp/tmp.pyGn9706aN /tmp/tmp.ZosmrDNEg3 + return 0 + '[' -z '' ']' + run_backup aws-s3 + local storage=aws-s3 + local backup_name=backup-aws-s3 + local type=logical + desc 'run backup backup-aws-s3' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-aws-s3 ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-aws-s3" | .spec.storageName = "aws-s3" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-aws-s3.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KFKBD5nWSy ++ mktemp + local LAST_ERR=/tmp/tmp.kKUkeqVP9H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KFKBD5nWSy perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 created + cat /tmp/tmp.kKUkeqVP9H + rm /tmp/tmp.KFKBD5nWSy /tmp/tmp.kKUkeqVP9H + return 0 + run_backup gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + local type=logical + desc 'run backup backup-gcp-cs' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-gcp-cs ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-gcp-cs" | .spec.storageName = "gcp-cs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-gcp-cs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.vrdKGG5k1P ++ mktemp + local LAST_ERR=/tmp/tmp.AsHLycYSvG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vrdKGG5k1P perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.AsHLycYSvG + rm /tmp/tmp.vrdKGG5k1P /tmp/tmp.AsHLycYSvG + return 0 + run_backup azure-blob + local storage=azure-blob + local backup_name=backup-azure-blob + local type=logical + desc 'run backup backup-azure-blob' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-azure-blob ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-azure-blob" | .spec.storageName = "azure-blob" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/e2e-tests/demand-backup/conf/backup-azure-blob.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RC5fQMX8ud ++ mktemp + local LAST_ERR=/tmp/tmp.tvaShSB9GY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RC5fQMX8ud perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob created + cat /tmp/tmp.tvaShSB9GY + rm /tmp/tmp.RC5fQMX8ud /tmp/tmp.tvaShSB9GY + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state. + '[' -z '' ']' + wait_backup backup-aws-s3 + local backup_name=backup-aws-s3 + local target_state=ready + set +o xtrace waiting for backup-aws-s3 to reach ready state..... + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + local target_state=ready + set +o xtrace waiting for backup-gcp-cs to reach ready state........ + wait_backup backup-azure-blob + local backup_name=backup-azure-blob + local target_state=ready + set +o xtrace waiting for backup-azure-blob to reach ready state. + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.RX4YDVNmFh ++ mktemp + local LAST_ERR=/tmp/tmp.mt9Y12y0hr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RX4YDVNmFh perconaservermongodb.psmdb.percona.com "some-name" deleted + cat /tmp/tmp.mt9Y12y0hr + rm /tmp/tmp.RX4YDVNmFh /tmp/tmp.mt9Y12y0hr + return 0 + sleep 60 + desc 'delete backup and check if it is removed from bucket -- minio' + set +o xtrace ----------------------------------------------------------------------------------- delete backup and check if it is removed from bucket -- minio ----------------------------------------------------------------------------------- + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.edsscwZN6Z ++ mktemp + local LAST_ERR=/tmp/tmp.EL8wXAUHGq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.edsscwZN6Z perconaservermongodbbackup.psmdb.percona.com "backup-aws-s3" deleted perconaservermongodbbackup.psmdb.percona.com "backup-azure-blob" deleted perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted + cat /tmp/tmp.EL8wXAUHGq + rm /tmp/tmp.edsscwZN6Z /tmp/tmp.EL8wXAUHGq + return 0 ++ grep -c operator-testing/2025-05-05T11:00:12Z ++ kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ ++ cat +++ mktemp ++ local LAST_OUT=/tmp/tmp.gh1ctns5kK +++ mktemp ++ local LAST_ERR=/tmp/tmp.66RJRdP5Il ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gh1ctns5kK ++ cat /tmp/tmp.66RJRdP5Il If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-10367 ++ rm /tmp/tmp.gh1ctns5kK /tmp/tmp.66RJRdP5Il ++ return 0 + backup_exists=0 + [[ 0 -eq 1 ]] + '[' -z '' ']' + check_backup_deletion https://s3.amazonaws.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z aws-s3 + path=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z + storage_name=aws-s3 + retry=0 ++ curl -sw '%{http_code}' -o /dev/null https://s3.amazonaws.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:00Z + [[ 403 -eq 403 ]] + check_backup_deletion https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z gcp-cs + path=https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z + storage_name=gcp-cs + retry=0 ++ curl -sw '%{http_code}' -o /dev/null https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z + [[ 404 -eq 403 ]] ++ curl -sw '%{http_code}' -o /dev/null https://storage.googleapis.com/operator-testing/psmdb-demand-backup/2025-05-05T11:00:22Z + [[ 404 -eq 404 ]] + check_backup_deletion https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z azure-blob + path=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z + storage_name=azure-blob + retry=0 ++ curl -sw '%{http_code}' -o /dev/null https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z + [[ 404 -eq 403 ]] ++ curl -sw '%{http_code}' -o /dev/null https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup/2025-05-05T11:00:35Z + [[ 404 -eq 404 ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ jq -r '.items[].data | to_entries | .[] | select(.key | (contains("_PASSWORD"))) | .value' ++ kubectl_bin get secrets -o json +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ezcua0ezDL +++ mktemp ++ local LAST_ERR=/tmp/tmp.hIZY50IuNC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ezcua0ezDL ++ cat /tmp/tmp.hIZY50IuNC ++ rm /tmp/tmp.Ezcua0ezDL /tmp/tmp.hIZY50IuNC ++ return 0 + secrets='YmFja3VwMTIzNDU2Iw== YmFja3VwMTIzNDU2JTIz Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2Iw== Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo secrets=YmFja3VwMTIzNDU2Iw== YmFja3VwMTIzNDU2JTIz Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2Iw== Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 secrets=YmFja3VwMTIzNDU2Iw== YmFja3VwMTIzNDU2JTIz Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2Iw== Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='backup123456# backup123456%23 clusterAdmin123456 clusterAdmin123456 clusterMonitor123456 clusterMonitor123456 databaseAdmin123456 databaseAdmin123456 userAdmin123456 userAdmin123456 backup123456# clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2Iw== YmFja3VwMTIzNDU2JTIz Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2Iw== Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo passwords=backup123456# backup123456%23 clusterAdmin123456 clusterAdmin123456 clusterMonitor123456 clusterMonitor123456 databaseAdmin123456 databaseAdmin123456 userAdmin123456 userAdmin123456 backup123456# clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2Iw== YmFja3VwMTIzNDU2JTIz Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2Iw== Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 passwords=backup123456# backup123456%23 clusterAdmin123456 clusterAdmin123456 clusterMonitor123456 clusterMonitor123456 databaseAdmin123456 databaseAdmin123456 userAdmin123456 userAdmin123456 backup123456# clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2Iw== YmFja3VwMTIzNDU2JTIz Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2Iw== Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ kubectl_bin get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ye1xEQiwyV +++ mktemp ++ local LAST_ERR=/tmp/tmp.t3mrVWH5Ea ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ye1xEQiwyV ++ cat /tmp/tmp.t3mrVWH5Ea ++ rm /tmp/tmp.Ye1xEQiwyV /tmp/tmp.t3mrVWH5Ea ++ return 0 + pods='minio-service-8967c7f7f-xt2rm psmdb-client-66f577db5f-k6mmp' + echo pods=minio-service-8967c7f7f-xt2rm psmdb-client-66f577db5f-k6mmp pods=minio-service-8967c7f7f-xt2rm psmdb-client-66f577db5f-k6mmp + collect_logs demand-backup-10367 + local containers + local count + NS=demand-backup-10367 + for p in '$pods' ++ kubectl_bin -n demand-backup-10367 get pod minio-service-8967c7f7f-xt2rm -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.28EkFt9VTt +++ mktemp ++ local LAST_ERR=/tmp/tmp.cwxOi9gVce ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n demand-backup-10367 get pod minio-service-8967c7f7f-xt2rm -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.28EkFt9VTt ++ cat /tmp/tmp.cwxOi9gVce ++ rm /tmp/tmp.28EkFt9VTt /tmp/tmp.cwxOi9gVce ++ return 0 + containers=minio + for c in '$containers' + [[ minio =~ pmm ]] + kubectl_bin -n demand-backup-10367 logs minio-service-8967c7f7f-xt2rm -c minio ++ mktemp + local LAST_OUT=/tmp/tmp.AbuNVWZnMJ ++ mktemp + local LAST_ERR=/tmp/tmp.npiHCzZ4eg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n demand-backup-10367 logs minio-service-8967c7f7f-xt2rm -c minio + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AbuNVWZnMJ + cat /tmp/tmp.npiHCzZ4eg + rm /tmp/tmp.AbuNVWZnMJ /tmp/tmp.npiHCzZ4eg + return 0 + echo logs saved in: /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt logs saved in: /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456# /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456%23 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456# /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2Iw== /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2JTIz /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2Iw== /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-minio-service-8967c7f7f-xt2rm-minio.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + for p in '$pods' ++ kubectl_bin -n demand-backup-10367 get pod psmdb-client-66f577db5f-k6mmp -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cgaJUMtoSi +++ mktemp ++ local LAST_ERR=/tmp/tmp.iqqKXoDOfu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n demand-backup-10367 get pod psmdb-client-66f577db5f-k6mmp -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cgaJUMtoSi ++ cat /tmp/tmp.iqqKXoDOfu ++ rm /tmp/tmp.cgaJUMtoSi /tmp/tmp.iqqKXoDOfu ++ return 0 + containers=psmdb-client + for c in '$containers' + [[ psmdb-client =~ pmm ]] + kubectl_bin -n demand-backup-10367 logs psmdb-client-66f577db5f-k6mmp -c psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.f2XVH6NWyz ++ mktemp + local LAST_ERR=/tmp/tmp.fj1jR3HLm5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n demand-backup-10367 logs psmdb-client-66f577db5f-k6mmp -c psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f2XVH6NWyz + cat /tmp/tmp.fj1jR3HLm5 + rm /tmp/tmp.f2XVH6NWyz /tmp/tmp.fj1jR3HLm5 + return 0 + echo logs saved in: /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt logs saved in: /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456# /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456%23 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456# /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2Iw== /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2JTIz /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2Iw== /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-psmdb-client-66f577db5f-k6mmp-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + '[' -n psmdb-operator ']' ++ awk -F / '{print $2}' ++ kubectl_bin -n psmdb-operator get pods -o name +++ mktemp ++ local LAST_OUT=/tmp/tmp.16HppabLSI +++ mktemp ++ local LAST_ERR=/tmp/tmp.VLNTBXRFAx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.16HppabLSI ++ cat /tmp/tmp.VLNTBXRFAx ++ rm /tmp/tmp.16HppabLSI /tmp/tmp.VLNTBXRFAx ++ return 0 + pods=percona-server-mongodb-operator-8f945b46d-smmnb + collect_logs psmdb-operator + local containers + local count + NS=psmdb-operator + for p in '$pods' ++ kubectl_bin -n psmdb-operator get pod percona-server-mongodb-operator-8f945b46d-smmnb -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sfLAoLXGyU +++ mktemp ++ local LAST_ERR=/tmp/tmp.18wd24Tw02 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pod percona-server-mongodb-operator-8f945b46d-smmnb -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sfLAoLXGyU ++ cat /tmp/tmp.18wd24Tw02 ++ rm /tmp/tmp.sfLAoLXGyU /tmp/tmp.18wd24Tw02 ++ return 0 + containers=percona-server-mongodb-operator + for c in '$containers' + [[ percona-server-mongodb-operator =~ pmm ]] + kubectl_bin -n psmdb-operator logs percona-server-mongodb-operator-8f945b46d-smmnb -c percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.REJzzDLV4r ++ mktemp + local LAST_ERR=/tmp/tmp.ZiMK2rYcDg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator logs percona-server-mongodb-operator-8f945b46d-smmnb -c percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.REJzzDLV4r + cat /tmp/tmp.ZiMK2rYcDg + rm /tmp/tmp.REJzzDLV4r /tmp/tmp.ZiMK2rYcDg + return 0 + echo logs saved in: /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt logs saved in: /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456# /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456%23 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456# /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2Iw== /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2JTIz /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2Iw== /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.lIZgOnVaLV/logs_output-percona-server-mongodb-operator-8f945b46d-smmnb-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + destroy demand-backup-10367 + local namespace=demand-backup-10367 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.9iwcmDWCm0 ++ mktemp + local LAST_ERR=/tmp/tmp.sv8qiphdep + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9iwcmDWCm0 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.sv8qiphdep + rm /tmp/tmp.9iwcmDWCm0 /tmp/tmp.sv8qiphdep + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.vRemRMWoQm ++ mktemp + local LAST_ERR=/tmp/tmp.F8GuB9pyEz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vRemRMWoQm + cat /tmp/tmp.F8GuB9pyEz + rm /tmp/tmp.vRemRMWoQm /tmp/tmp.F8GuB9pyEz + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.P82xoMwCVh ++ mktemp + local LAST_ERR=/tmp/tmp.kU8B85qbIv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P82xoMwCVh + cat /tmp/tmp.kU8B85qbIv + rm /tmp/tmp.P82xoMwCVh /tmp/tmp.kU8B85qbIv + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.sTm1sPmDM7 ++ mktemp + local LAST_ERR=/tmp/tmp.fTKr3y9DP8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sTm1sPmDM7 + cat /tmp/tmp.fTKr3y9DP8 + rm /tmp/tmp.sTm1sPmDM7 /tmp/tmp.fTKr3y9DP8 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.5mgkzAKRRD ++ mktemp + local LAST_ERR=/tmp/tmp.p4VdrWxmfM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1776/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5mgkzAKRRD clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.p4VdrWxmfM + rm /tmp/tmp.5mgkzAKRRD /tmp/tmp.p4VdrWxmfM + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.RlAnAh9Qon ++ mktemp + local LAST_ERR=/tmp/tmp.HYN1xm3JEV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.RlAnAh9Qon + cat /tmp/tmp.HYN1xm3JEV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.RlAnAh9Qon + cat /tmp/tmp.HYN1xm3JEV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.RlAnAh9Qon + cat /tmp/tmp.HYN1xm3JEV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.RlAnAh9Qon + cat /tmp/tmp.HYN1xm3JEV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.RlAnAh9Qon /tmp/tmp.HYN1xm3JEV + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.lIZgOnVaLV + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-10367 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.cox6uuiM1A + local LAST_OUT=/tmp/tmp.SOwtmwpz7v ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.JkOwYVvJm1 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.3nZQLZo1Jw + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + set +e + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + kubectl delete --grace-period=0 --force=true namespace demand-backup-10367