Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/logs/demand-backup-snapshot.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 ++ deploy_volume_snapshot_class ++ local snapshot_class_file ++ local k8s_env +++ detect_k8s_env +++ local provider=unknown +++ kubectl_bin api-resources +++ grep -q openshift.io ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xuRFeMaUNN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zUoCWTj53d +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl api-resources +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xuRFeMaUNN +++ cat /tmp/tmp.zUoCWTj53d +++ rm /tmp/tmp.xuRFeMaUNN /tmp/tmp.zUoCWTj53d +++ return 0 +++ kubectl_bin get nodes -o 'jsonpath={.items[0].metadata.labels}' +++ grep -q eks.amazonaws.com +++ kubectl_bin get nodes -o 'jsonpath={.items[0].metadata.labels}' +++ grep -q cloud.google.com/gke +++ provider=gke +++ echo gke ++ k8s_env=gke ++ case "$k8s_env" in ++ snapshot_class_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/gke-volume-snapshot-class.yaml ++ kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/gke-volume-snapshot-class.yaml +++ mktemp ++ local LAST_OUT=/tmp/tmp.dQxIo0bHCb +++ mktemp ++ local LAST_ERR=/tmp/tmp.oPjas2Euxv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/gke-volume-snapshot-class.yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dQxIo0bHCb volumesnapshotclass.snapshot.storage.k8s.io/gke-snapshot-class unchanged ++ cat /tmp/tmp.oPjas2Euxv ++ rm /tmp/tmp.dQxIo0bHCb /tmp/tmp.oPjas2Euxv ++ return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/gke-volume-snapshot-class.yaml + VOLUME_SNAPSHOT_CLASS=gke-snapshot-class + create_infra demand-backup-snapshot-17683 + local ns=demand-backup-snapshot-17683 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.KN0kLPdMFF ++ mktemp + local LAST_ERR=/tmp/tmp.rqhbGJGhmJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KN0kLPdMFF customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.rqhbGJGhmJ + rm /tmp/tmp.KN0kLPdMFF /tmp/tmp.rqhbGJGhmJ + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.HEL5XHg3Ux ++ mktemp + local LAST_ERR=/tmp/tmp.zWzOEZv4Wz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HEL5XHg3Ux + cat /tmp/tmp.zWzOEZv4Wz + rm /tmp/tmp.HEL5XHg3Ux /tmp/tmp.zWzOEZv4Wz + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.FNJIiLWGGu ++ mktemp + local LAST_ERR=/tmp/tmp.hxa6GZoRHi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FNJIiLWGGu + cat /tmp/tmp.hxa6GZoRHi + rm /tmp/tmp.FNJIiLWGGu /tmp/tmp.hxa6GZoRHi + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dlIUqDa9yj ++ mktemp + local LAST_ERR=/tmp/tmp.fkKSipFdvF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dlIUqDa9yj + cat /tmp/tmp.fkKSipFdvF + rm /tmp/tmp.dlIUqDa9yj /tmp/tmp.fkKSipFdvF + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ZENNJ7k4fa ++ mktemp + local LAST_ERR=/tmp/tmp.AnE1x5SWrs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZENNJ7k4fa clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.AnE1x5SWrs + rm /tmp/tmp.ZENNJ7k4fa /tmp/tmp.AnE1x5SWrs + return 0 + check_crd_for_deletion PR-2219-fe9d8c05 + local git_tag=PR-2219-fe9d8c05 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2219-fe9d8c05/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OUJ2IJGXlL +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zhb2L1Z5y1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.OUJ2IJGXlL ++ cat /tmp/tmp.Zhb2L1Z5y1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.OUJ2IJGXlL ++ cat /tmp/tmp.Zhb2L1Z5y1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.OUJ2IJGXlL ++ cat /tmp/tmp.Zhb2L1Z5y1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.OUJ2IJGXlL ++ cat /tmp/tmp.Zhb2L1Z5y1 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.OUJ2IJGXlL /tmp/tmp.Zhb2L1Z5y1 ++ return 1 + [[ '' == Terminating ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace + awk '{print$1}' ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.SzX1rO6uJl + local LAST_OUT=/tmp/tmp.gI2JLzqsj6 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.MV3OTH2QdN + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.ZwAxoXxmVg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gI2JLzqsj6 + cat /tmp/tmp.ZwAxoXxmVg + rm /tmp/tmp.gI2JLzqsj6 /tmp/tmp.ZwAxoXxmVg + return 0 namespace "demand-backup-snapshot-7009" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SzX1rO6uJl namespace "psmdb-operator" deleted + cat /tmp/tmp.MV3OTH2QdN + rm /tmp/tmp.SzX1rO6uJl /tmp/tmp.MV3OTH2QdN + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.rG5FTEnNCP ++ mktemp + local LAST_ERR=/tmp/tmp.XvnEJJOFrd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rG5FTEnNCP + cat /tmp/tmp.XvnEJJOFrd + rm /tmp/tmp.rG5FTEnNCP /tmp/tmp.XvnEJJOFrd + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.R2VrjL4Jm2 ++ mktemp + local LAST_ERR=/tmp/tmp.ILQkBYikWG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R2VrjL4Jm2 namespace/psmdb-operator created + cat /tmp/tmp.ILQkBYikWG + rm /tmp/tmp.R2VrjL4Jm2 /tmp/tmp.ILQkBYikWG + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.pZ94L1jv92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SGIXAjimQj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pZ94L1jv92 ++ cat /tmp/tmp.SGIXAjimQj ++ rm /tmp/tmp.pZ94L1jv92 /tmp/tmp.SGIXAjimQj ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-fe9d8c05-8-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.0L8VLefI4o ++ mktemp + local LAST_ERR=/tmp/tmp.HYaG2EWzXN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-fe9d8c05-8-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0L8VLefI4o Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-fe9d8c05-8-cluster2" modified. + cat /tmp/tmp.HYaG2EWzXN + rm /tmp/tmp.0L8VLefI4o /tmp/tmp.HYaG2EWzXN + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-fe9d8c05' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-fe9d8c05 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.mLN8orRLlp ++ mktemp + local LAST_ERR=/tmp/tmp.jt0TCfiKSg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mLN8orRLlp customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.jt0TCfiKSg + rm /tmp/tmp.mLN8orRLlp /tmp/tmp.jt0TCfiKSg + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Rz57GtJPIe ++ mktemp + local LAST_ERR=/tmp/tmp.pj81Bh2FGL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Rz57GtJPIe clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.pj81Bh2FGL + rm /tmp/tmp.Rz57GtJPIe /tmp/tmp.pj81Bh2FGL + return 0 + yq eval $'\n\t\t\t(.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-fe9d8c05") |\n\t\t\t((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") |\n\t\t\t((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.5jFoAe1KWB ++ mktemp + local LAST_ERR=/tmp/tmp.otIGAgho87 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5jFoAe1KWB deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.otIGAgho87 + rm /tmp/tmp.5jFoAe1KWB /tmp/tmp.otIGAgho87 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.VidjWrhOUI +++ mktemp ++ local LAST_ERR=/tmp/tmp.DmGOWiSYWx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VidjWrhOUI ++ cat /tmp/tmp.DmGOWiSYWx ++ rm /tmp/tmp.VidjWrhOUI /tmp/tmp.DmGOWiSYWx ++ return 0 + wait_operator_pod percona-server-mongodb-operator-5796f4f5b5-smb6j + local pod=percona-server-mongodb-operator-5796f4f5b5-smb6j + set +o xtrace waiting for pod/percona-server-mongodb-operator-5796f4f5b5-smb6j to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.AR4fWpzuv8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WizgZ2erXm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AR4fWpzuv8 ++ cat /tmp/tmp.WizgZ2erXm ++ rm /tmp/tmp.AR4fWpzuv8 /tmp/tmp.WizgZ2erXm ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-5796f4f5b5-smb6j ++ mktemp + local LAST_OUT=/tmp/tmp.Qm0O9aki3k ++ mktemp + local LAST_ERR=/tmp/tmp.MWNP1eQdZk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-5796f4f5b5-smb6j + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qm0O9aki3k + cat /tmp/tmp.MWNP1eQdZk + rm /tmp/tmp.Qm0O9aki3k /tmp/tmp.MWNP1eQdZk + return 0 2026-04-10T10:38:37.845Z INFO setup Manager starting up {"gitCommit": "fe9d8c05ac7c569882d27d07bd9604e14009ab07", "gitBranch": "PR-2219-fe9d8c05", "buildTime": "", "goVersion": "go1.25.9", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-snapshot-17683 + local namespace=demand-backup-snapshot-17683 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + xargs kubectl delete ns + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces demand-backup-snapshot-17683' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-snapshot-17683 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-snapshot-17683 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Oz30kv1LCJ + local LAST_OUT=/tmp/tmp.9zzhRLtAII ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.ylcC6PnERV + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.CP4Cl0WtVE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-snapshot-17683 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Oz30kv1LCJ + cat /tmp/tmp.ylcC6PnERV + rm /tmp/tmp.Oz30kv1LCJ /tmp/tmp.ylcC6PnERV + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9zzhRLtAII error: resource(s) were provided, but no name was specified + cat /tmp/tmp.CP4Cl0WtVE + rm /tmp/tmp.9zzhRLtAII /tmp/tmp.CP4Cl0WtVE + return 0 + kubectl_bin wait --for=delete namespace demand-backup-snapshot-17683 ++ mktemp + local LAST_OUT=/tmp/tmp.0BQoMahNRs ++ mktemp + local LAST_ERR=/tmp/tmp.WUIDogtmdA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-snapshot-17683 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0BQoMahNRs + cat /tmp/tmp.WUIDogtmdA + rm /tmp/tmp.0BQoMahNRs /tmp/tmp.WUIDogtmdA + return 0 + desc 'create namespace demand-backup-snapshot-17683' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-snapshot-17683 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-snapshot-17683 ++ mktemp + local LAST_OUT=/tmp/tmp.WU1150H3gG ++ mktemp + local LAST_ERR=/tmp/tmp.L8jBlyD59m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-snapshot-17683 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WU1150H3gG namespace/demand-backup-snapshot-17683 created + cat /tmp/tmp.L8jBlyD59m + rm /tmp/tmp.WU1150H3gG /tmp/tmp.L8jBlyD59m + return 0 + set_kube_ctx demand-backup-snapshot-17683 + local namespace=demand-backup-snapshot-17683 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.O4JBKyCOlz +++ mktemp ++ local LAST_ERR=/tmp/tmp.PZ9MUu3kxE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O4JBKyCOlz ++ cat /tmp/tmp.PZ9MUu3kxE ++ rm /tmp/tmp.O4JBKyCOlz /tmp/tmp.PZ9MUu3kxE ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-fe9d8c05-8-cluster2 --namespace=demand-backup-snapshot-17683 ++ mktemp + local LAST_OUT=/tmp/tmp.ULc9SLn7aS ++ mktemp + local LAST_ERR=/tmp/tmp.ru2gYIXAty + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-fe9d8c05-8-cluster2 --namespace=demand-backup-snapshot-17683 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ULc9SLn7aS Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-fe9d8c05-8-cluster2" modified. + cat /tmp/tmp.ru2gYIXAty + rm /tmp/tmp.ULc9SLn7aS /tmp/tmp.ru2gYIXAty + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Fri Apr 10 10:39:12 2026 NAMESPACE: demand-backup-snapshot-17683 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-snapshot-17683.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-snapshot-17683 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-snapshot-17683 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-snapshot-17683 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-snapshot-17683 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XnTPEx2lp3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PgiNIrCfgK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XnTPEx2lp3 ++ cat /tmp/tmp.PgiNIrCfgK ++ rm /tmp/tmp.XnTPEx2lp3 /tmp/tmp.PgiNIrCfgK ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-lvzfz + wait_pod minio-service-6d5f646cdc-lvzfz + local pod=minio-service-6d5f646cdc-lvzfz + set +o xtrace waiting for pod/minio-service-6d5f646cdc-lvzfz to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-snapshot-17683.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.1QEsDejATj ++ mktemp + local LAST_ERR=/tmp/tmp.wYZw0WFi8u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1QEsDejATj pod "aws-cli" deleted from demand-backup-snapshot-17683 namespace + cat /tmp/tmp.wYZw0WFi8u All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.1QEsDejATj /tmp/tmp.wYZw0WFi8u + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.wSSO8OaJf6 ++ mktemp + local LAST_ERR=/tmp/tmp.8Nq65AWEaz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wSSO8OaJf6 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.8Nq65AWEaz + rm /tmp/tmp.wSSO8OaJf6 /tmp/tmp.8Nq65AWEaz + return 0 + cluster=some-name + desc 'Testing snapshot backup/restore on unencrypted cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing snapshot backup/restore on unencrypted cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.PbuhJHFfjN ++ mktemp + local LAST_ERR=/tmp/tmp.1bIsLPNU3J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PbuhJHFfjN secret/some-users created + cat /tmp/tmp.1bIsLPNU3J + rm /tmp/tmp.PbuhJHFfjN /tmp/tmp.1bIsLPNU3J + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-fe9d8c05"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-snapshot-17683/g + local LAST_OUT=/tmp/tmp.voz2K5pniR ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.xPNaRdEa9N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.voz2K5pniR perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.xPNaRdEa9N + rm /tmp/tmp.voz2K5pniR /tmp/tmp.xPNaRdEa9N + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Nx9f5lUt4j ++ mktemp + local LAST_ERR=/tmp/tmp.Uw0ZqY8unJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nx9f5lUt4j deployment.apps/psmdb-client created + cat /tmp/tmp.Uw0ZqY8unJ + rm /tmp/tmp.Nx9f5lUt4j /tmp/tmp.Uw0ZqY8unJ + return 0 + echo 'check if all pods started' check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cj1NTvFMah +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ud2c7zx1xK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cj1NTvFMah ++ cat /tmp/tmp.Ud2c7zx1xK ++ rm /tmp/tmp.cj1NTvFMah /tmp/tmp.Ud2c7zx1xK ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0PiGbltggE +++ mktemp ++ local LAST_ERR=/tmp/tmp.zMNrjpqWhY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0PiGbltggE ++ cat /tmp/tmp.zMNrjpqWhY ++ rm /tmp/tmp.0PiGbltggE /tmp/tmp.zMNrjpqWhY ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rle1E4q6Iy +++ mktemp ++ local LAST_ERR=/tmp/tmp.LOwrMFKJux ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rle1E4q6Iy ++ cat /tmp/tmp.LOwrMFKJux ++ rm /tmp/tmp.rle1E4q6Iy /tmp/tmp.LOwrMFKJux ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness....... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ud4euCmiAS +++ mktemp ++ local LAST_ERR=/tmp/tmp.YQMqKkn2GO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ud4euCmiAS ++ cat /tmp/tmp.YQMqKkn2GO ++ rm /tmp/tmp.Ud4euCmiAS /tmp/tmp.YQMqKkn2GO ++ return 0 + [[ ready == ready ]] + echo .OK .OK + sleep 60 + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.OK + echo 'Writing test data' Writing test data + run_mongo_tls 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FyaqvpA8UD +++ mktemp ++ local LAST_ERR=/tmp/tmp.nm3jNNKLq2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FyaqvpA8UD ++ cat /tmp/tmp.nm3jNNKLq2 ++ rm /tmp/tmp.FyaqvpA8UD /tmp/tmp.nm3jNNKLq2 ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.KziW8abhhz ++ mktemp + local LAST_ERR=/tmp/tmp.8CruTyeJ6A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KziW8abhhz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-10T10:43:12.398Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("be9acd0f-0e98-489d-b846-236ebf5e2b25") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.8CruTyeJ6A + rm /tmp/tmp.KziW8abhhz /tmp/tmp.8CruTyeJ6A + return 0 + sleep 1 + run_mongo_tls 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AUB4D3DVdA +++ mktemp ++ local LAST_ERR=/tmp/tmp.naduZ63ulN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AUB4D3DVdA ++ cat /tmp/tmp.naduZ63ulN ++ rm /tmp/tmp.AUB4D3DVdA /tmp/tmp.naduZ63ulN ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.4V25YKHzpb ++ mktemp + local LAST_ERR=/tmp/tmp.Afo2U6HNEZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4V25YKHzpb Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-10T10:43:16.262Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("24c32c56-0080-486c-a0a0-c5fab9e870af") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Afo2U6HNEZ + rm /tmp/tmp.4V25YKHzpb /tmp/tmp.Afo2U6HNEZ + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T10:43:21+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h1WhAuvrQm +++ mktemp ++ local LAST_ERR=/tmp/tmp.cQIv9jJQL9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h1WhAuvrQm ++ cat /tmp/tmp.cQIv9jJQL9 ++ rm /tmp/tmp.h1WhAuvrQm /tmp/tmp.cQIv9jJQL9 ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.4sFQc4ECRC ++ mktemp + local LAST_ERR=/tmp/tmp.CFy07mSyYL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4sFQc4ECRC + cat /tmp/tmp.CFy07mSyYL + rm /tmp/tmp.4sFQc4ECRC /tmp/tmp.CFy07mSyYL + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T10:43:23+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h91uOMngh8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZIhc9fxOx6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h91uOMngh8 ++ cat /tmp/tmp.ZIhc9fxOx6 ++ rm /tmp/tmp.h91uOMngh8 /tmp/tmp.ZIhc9fxOx6 ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.2QVxG0jcqf ++ mktemp + local LAST_ERR=/tmp/tmp.rv6y7OyTat + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2QVxG0jcqf + cat /tmp/tmp.rv6y7OyTat + rm /tmp/tmp.2QVxG0jcqf /tmp/tmp.rv6y7OyTat + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T10:43:26+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uTbUhITzaS +++ mktemp ++ local LAST_ERR=/tmp/tmp.zEtHelOqTS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uTbUhITzaS ++ cat /tmp/tmp.zEtHelOqTS ++ rm /tmp/tmp.uTbUhITzaS /tmp/tmp.zEtHelOqTS ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.VmP9HLcmPs ++ mktemp + local LAST_ERR=/tmp/tmp.saBX7loNxW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VmP9HLcmPs + cat /tmp/tmp.saBX7loNxW + rm /tmp/tmp.VmP9HLcmPs /tmp/tmp.saBX7loNxW + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + echo 'Running snapshot backup (unencrypted)' Running snapshot backup (unencrypted) + backup_name=backup-snapshot + run_snapshot_backup backup-snapshot + local backup_name=backup-snapshot + log 'running snapshot backup backup-snapshot' + set +o xtrace [2026-04-10T10:43:28+0000] running snapshot backup backup-snapshot + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-snapshot" | .spec.volumeSnapshotClass = "gke-snapshot-class"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/backup.yml ++ mktemp + local LAST_OUT=/tmp/tmp.hg7fJSvVxy ++ mktemp + local LAST_ERR=/tmp/tmp.iTGMKw2HOh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hg7fJSvVxy perconaservermongodbbackup.psmdb.percona.com/backup-snapshot created + cat /tmp/tmp.iTGMKw2HOh + rm /tmp/tmp.hg7fJSvVxy /tmp/tmp.iTGMKw2HOh + return 0 + wait_backup backup-snapshot + local backup_name=backup-snapshot + local target_state=ready + set +o xtrace waiting for backup-snapshot to reach ready state......................OK + echo 'Drop collection and restore from snapshot (unencrypted)' Drop collection and restore from snapshot (unencrypted) + run_mongo_tls 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ebns5eoXBp +++ mktemp ++ local LAST_ERR=/tmp/tmp.aZKIKZcH1Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ebns5eoXBp ++ cat /tmp/tmp.aZKIKZcH1Q ++ rm /tmp/tmp.ebns5eoXBp /tmp/tmp.aZKIKZcH1Q ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.TsFAZhgACW ++ mktemp + local LAST_ERR=/tmp/tmp.uw0Ths40xU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TsFAZhgACW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-10T10:44:15.367Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("73923b6d-9444-4fa5-8f29-10762bd7aaab") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.uw0Ths40xU + rm /tmp/tmp.TsFAZhgACW /tmp/tmp.uw0Ths40xU + return 0 + run_restore backup-snapshot + local backup_name=backup-snapshot + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/restore.yml + log 'running restore restore-backup-snapshot' + set +o xtrace [2026-04-10T10:44:15+0000] running restore restore-backup-snapshot + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-snapshot/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-snapshot/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.hnYIvM9mp1 ++ mktemp + local LAST_ERR=/tmp/tmp.lETAU0AAYR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hnYIvM9mp1 perconaservermongodbrestore.psmdb.percona.com/restore-backup-snapshot created + cat /tmp/tmp.lETAU0AAYR + rm /tmp/tmp.hnYIvM9mp1 /tmp/tmp.lETAU0AAYR + return 0 + run_snapshot_recovery_check backup-snapshot + local backup_name=backup-snapshot + local target_cluster=some-name + wait_restore backup-snapshot some-name ready 0 3000 + local backup_name=backup-snapshot + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-snapshot object to be created.OK Waiting psmdb-restore/restore-backup-snapshot to reach state "ready" .......OK after 6 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.metadata.annotations.percona\.com/resync-pbm}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hnaVlDgrpl +++ mktemp ++ local LAST_ERR=/tmp/tmp.fqkwJ3Ylqm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.metadata.annotations.percona\.com/resync-pbm}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hnaVlDgrpl ++ cat /tmp/tmp.fqkwJ3Ylqm ++ rm /tmp/tmp.hnaVlDgrpl /tmp/tmp.fqkwJ3Ylqm ++ return 0 + '[' true '!=' true ']' + log 'Operator triggered PBM resync: OK' + set +o xtrace [2026-04-10T10:50:40+0000] Operator triggered PBM resync: OK + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yccr8rExI1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ymvwo7QLPs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yccr8rExI1 ++ cat /tmp/tmp.ymvwo7QLPs ++ rm /tmp/tmp.yccr8rExI1 /tmp/tmp.ymvwo7QLPs ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QsWs8CNlO1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QZHppRUdL1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QsWs8CNlO1 ++ cat /tmp/tmp.QZHppRUdL1 ++ rm /tmp/tmp.QsWs8CNlO1 /tmp/tmp.QZHppRUdL1 ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CDaOTzxcBm +++ mktemp ++ local LAST_ERR=/tmp/tmp.CMfqkO3haj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CDaOTzxcBm ++ cat /tmp/tmp.CMfqkO3haj ++ rm /tmp/tmp.CDaOTzxcBm /tmp/tmp.CMfqkO3haj ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XnIcJZ0Z0y +++ mktemp ++ local LAST_ERR=/tmp/tmp.XGR4mT52Vc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XnIcJZ0Z0y ++ cat /tmp/tmp.XGR4mT52Vc ++ rm /tmp/tmp.XnIcJZ0Z0y /tmp/tmp.XGR4mT52Vc ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AqCDSpMqOP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lgd9cBaRzo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AqCDSpMqOP ++ cat /tmp/tmp.Lgd9cBaRzo ++ rm /tmp/tmp.AqCDSpMqOP /tmp/tmp.Lgd9cBaRzo ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dxjbaKG8Gy +++ mktemp ++ local LAST_ERR=/tmp/tmp.z7rfqEgg6G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dxjbaKG8Gy ++ cat /tmp/tmp.z7rfqEgg6G ++ rm /tmp/tmp.dxjbaKG8Gy /tmp/tmp.z7rfqEgg6G ++ return 0 + [[ ready == ready ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.OK + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T10:51:45+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yMkvWAdcIy +++ mktemp ++ local LAST_ERR=/tmp/tmp.sRbVjWwvR0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yMkvWAdcIy ++ cat /tmp/tmp.sRbVjWwvR0 ++ rm /tmp/tmp.yMkvWAdcIy /tmp/tmp.sRbVjWwvR0 ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.oH0J5eFqE0 ++ mktemp + local LAST_ERR=/tmp/tmp.ei2EaO3Z5D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oH0J5eFqE0 + cat /tmp/tmp.ei2EaO3Z5D + rm /tmp/tmp.oH0J5eFqE0 /tmp/tmp.ei2EaO3Z5D + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T10:51:48+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6MGrEWfUwr +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fw5iwEfeu3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6MGrEWfUwr ++ cat /tmp/tmp.Fw5iwEfeu3 ++ rm /tmp/tmp.6MGrEWfUwr /tmp/tmp.Fw5iwEfeu3 ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.D2WYPAtleB ++ mktemp + local LAST_ERR=/tmp/tmp.uHqH6rQY9z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D2WYPAtleB + cat /tmp/tmp.uHqH6rQY9z + rm /tmp/tmp.D2WYPAtleB /tmp/tmp.uHqH6rQY9z + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T10:51:51+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jH4hg4fVXe +++ mktemp ++ local LAST_ERR=/tmp/tmp.eDMF2uXqB2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jH4hg4fVXe ++ cat /tmp/tmp.eDMF2uXqB2 ++ rm /tmp/tmp.jH4hg4fVXe /tmp/tmp.eDMF2uXqB2 ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.NCW9E1qvWz ++ mktemp + local LAST_ERR=/tmp/tmp.k90AGX3SNY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NCW9E1qvWz + cat /tmp/tmp.k90AGX3SNY + rm /tmp/tmp.NCW9E1qvWz /tmp/tmp.k90AGX3SNY + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + log 'Data restored: OK' + set +o xtrace [2026-04-10T10:51:54+0000] Data restored: OK + desc 'Testing cross-cluster snapshot restore to a new cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing cross-cluster snapshot restore to a new cluster ----------------------------------------------------------------------------------- + new_cluster=some-name-new + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name-new.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name-new.yml ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name-new.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + local LAST_OUT=/tmp/tmp.etc3ixVtFN + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-fe9d8c05"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-snapshot-17683/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.KgAS8XW7fA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.etc3ixVtFN perconaservermongodb.psmdb.percona.com/some-name-new created + cat /tmp/tmp.KgAS8XW7fA + rm /tmp/tmp.etc3ixVtFN /tmp/tmp.KgAS8XW7fA + return 0 + echo 'check if all pods started on new cluster' check if all pods started on new cluster + wait_for_running some-name-new-rs0 3 + local name=some-name-new-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name-new ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-new-rs0-0 + local pod=some-name-new-rs0-0 + set +o xtrace waiting for pod/some-name-new-rs0-0 to be ready.........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-new-rs0-1 + local pod=some-name-new-rs0-1 + set +o xtrace waiting for pod/some-name-new-rs0-1 to be ready....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KFSa6wUiXy +++ mktemp ++ local LAST_ERR=/tmp/tmp.YIsLy9EiuG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KFSa6wUiXy ++ cat /tmp/tmp.YIsLy9EiuG ++ rm /tmp/tmp.KFSa6wUiXy /tmp/tmp.YIsLy9EiuG ++ return 0 + [[ '' == true ]] + wait_pod some-name-new-rs0-2 + local pod=some-name-new-rs0-2 + set +o xtrace waiting for pod/some-name-new-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5DZfYeDdFZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.UkZuJ65L6U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5DZfYeDdFZ ++ cat /tmp/tmp.UkZuJ65L6U ++ rm /tmp/tmp.5DZfYeDdFZ /tmp/tmp.UkZuJ65L6U ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MRt7mlJVbQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.moI6I3CvUr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MRt7mlJVbQ ++ cat /tmp/tmp.moI6I3CvUr ++ rm /tmp/tmp.MRt7mlJVbQ /tmp/tmp.moI6I3CvUr ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness... + wait_cluster_consistency some-name-new + local cluster_name=some-name-new + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9IhFombsu1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yO7h1mldQs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9IhFombsu1 ++ cat /tmp/tmp.yO7h1mldQs ++ rm /tmp/tmp.9IhFombsu1 /tmp/tmp.yO7h1mldQs ++ return 0 + [[ ready == ready ]] + echo .OK .OK + sleep 60 + wait_for_pbm_operations some-name-new + local cluster=some-name-new + set +o xtrace waiting for PBM operation to finish.OK + echo 'Restoring snapshot to new cluster using backupSource' Restoring snapshot to new cluster using backupSource + run_snapshot_restore_backupsource backup-snapshot some-name-new + local backup_name=backup-snapshot + local target_cluster=some-name-new + local snapshots ++ kubectl_bin get psmdb-backup backup-snapshot -o 'jsonpath={.status.snapshots}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1JW9VTBFn2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.a5IfY3s8Q9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-snapshot -o 'jsonpath={.status.snapshots}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1JW9VTBFn2 ++ cat /tmp/tmp.a5IfY3s8Q9 ++ rm /tmp/tmp.1JW9VTBFn2 /tmp/tmp.a5IfY3s8Q9 ++ return 0 + snapshots='[{"replsetName":"rs0","snapshotName":"backup-snapshot-rs0"}]' + log 'running cross-cluster snapshot restore to some-name-new from backup backup-snapshot' + set +o xtrace [2026-04-10T10:54:33+0000] running cross-cluster snapshot restore to some-name-new from backup backup-snapshot + backup_name=backup-snapshot + target_cluster=some-name-new + snapshots='[{"replsetName":"rs0","snapshotName":"backup-snapshot-rs0"}]' + yq eval $'\n\t\t\t.metadata.name = "restore-backupsource-" + strenv(backup_name) |\n\t\t\t.spec.clusterName = strenv(target_cluster) |\n\t\t\t.spec.backupSource.snapshots = env(snapshots)\n\t\t' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/restore-backupsource.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rgfxKwvmSD ++ mktemp + local LAST_ERR=/tmp/tmp.bP53DiMB5O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rgfxKwvmSD perconaservermongodbrestore.psmdb.percona.com/restore-backupsource-backup-snapshot created + cat /tmp/tmp.bP53DiMB5O + rm /tmp/tmp.rgfxKwvmSD /tmp/tmp.bP53DiMB5O + return 0 + run_snapshot_recovery_check backupsource-backup-snapshot some-name-new + local backup_name=backupsource-backup-snapshot + local target_cluster=some-name-new + wait_restore backupsource-backup-snapshot some-name-new ready 0 3000 + local backup_name=backupsource-backup-snapshot + local cluster_name=some-name-new + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backupsource-backup-snapshot object to be created.OK Waiting psmdb-restore/restore-backupsource-backup-snapshot to reach state "ready" .......OK after 6 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.metadata.annotations.percona\.com/resync-pbm}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.khVDaQGba9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MP2QxbNioW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.metadata.annotations.percona\.com/resync-pbm}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.khVDaQGba9 ++ cat /tmp/tmp.MP2QxbNioW ++ rm /tmp/tmp.khVDaQGba9 /tmp/tmp.MP2QxbNioW ++ return 0 + '[' true '!=' true ']' + log 'Operator triggered PBM resync: OK' + set +o xtrace [2026-04-10T11:00:53+0000] Operator triggered PBM resync: OK + wait_cluster_consistency some-name-new + local cluster_name=some-name-new + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sLxzCY8XfJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.R4iIh9jNGs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sLxzCY8XfJ ++ cat /tmp/tmp.R4iIh9jNGs ++ rm /tmp/tmp.sLxzCY8XfJ /tmp/tmp.R4iIh9jNGs ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SU2qpGY3LM +++ mktemp ++ local LAST_ERR=/tmp/tmp.sXH0tn30Fu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SU2qpGY3LM ++ cat /tmp/tmp.sXH0tn30Fu ++ rm /tmp/tmp.SU2qpGY3LM /tmp/tmp.sXH0tn30Fu ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NKHzyLtFSV +++ mktemp ++ local LAST_ERR=/tmp/tmp.gjpQX5Y2uu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NKHzyLtFSV ++ cat /tmp/tmp.gjpQX5Y2uu ++ rm /tmp/tmp.NKHzyLtFSV /tmp/tmp.gjpQX5Y2uu ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1mDlQt5RWv +++ mktemp ++ local LAST_ERR=/tmp/tmp.9X8z4DyRds ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1mDlQt5RWv ++ cat /tmp/tmp.9X8z4DyRds ++ rm /tmp/tmp.1mDlQt5RWv /tmp/tmp.9X8z4DyRds ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Iwjeq1jgPe +++ mktemp ++ local LAST_ERR=/tmp/tmp.deK9jIYdu3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Iwjeq1jgPe ++ cat /tmp/tmp.deK9jIYdu3 ++ rm /tmp/tmp.Iwjeq1jgPe /tmp/tmp.deK9jIYdu3 ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IO71vFxKUG +++ mktemp ++ local LAST_ERR=/tmp/tmp.DvCGhdBG1D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IO71vFxKUG ++ cat /tmp/tmp.DvCGhdBG1D ++ rm /tmp/tmp.IO71vFxKUG /tmp/tmp.DvCGhdBG1D ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.59Xhs8EAUy +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qkn9G7CkZp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.59Xhs8EAUy ++ cat /tmp/tmp.Qkn9G7CkZp ++ rm /tmp/tmp.59Xhs8EAUy /tmp/tmp.Qkn9G7CkZp ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-new -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YBxUFkjwvT +++ mktemp ++ local LAST_ERR=/tmp/tmp.XkJqQHeHdL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-new -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YBxUFkjwvT ++ cat /tmp/tmp.XkJqQHeHdL ++ rm /tmp/tmp.YBxUFkjwvT /tmp/tmp.XkJqQHeHdL ++ return 0 + [[ ready == ready ]] + echo .OK .OK + wait_for_pbm_operations some-name-new + local cluster=some-name-new + set +o xtrace waiting for PBM operation to finish.OK + compare_mongo_cmd find myApp:myPass@some-name-new-rs0-0.some-name-new-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-new-rs0-0.some-name-new-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:02:20+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-new-rs0-0.some-name-new-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-new-rs0-0.some-name-new-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gxGszhvwJs +++ mktemp ++ local LAST_ERR=/tmp/tmp.vSRiDVVVas ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gxGszhvwJs ++ cat /tmp/tmp.vSRiDVVVas ++ rm /tmp/tmp.gxGszhvwJs /tmp/tmp.vSRiDVVVas ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-new-rs0-0.some-name-new-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-new-rs0-0.some-name-new-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.Bl19XOOb0r ++ mktemp + local LAST_ERR=/tmp/tmp.ZuLYHBAuNP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-new-rs0-0.some-name-new-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bl19XOOb0r + cat /tmp/tmp.ZuLYHBAuNP + rm /tmp/tmp.Bl19XOOb0r /tmp/tmp.ZuLYHBAuNP + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-new-rs0-1.some-name-new-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-new-rs0-1.some-name-new-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:02:23+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-new-rs0-1.some-name-new-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-new-rs0-1.some-name-new-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2H9yAnnqbm +++ mktemp ++ local LAST_ERR=/tmp/tmp.otVfMFhl81 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2H9yAnnqbm ++ cat /tmp/tmp.otVfMFhl81 ++ rm /tmp/tmp.2H9yAnnqbm /tmp/tmp.otVfMFhl81 ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-new-rs0-1.some-name-new-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-new-rs0-1.some-name-new-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.s8c8si0anL ++ mktemp + local LAST_ERR=/tmp/tmp.pp8ThRn67o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-new-rs0-1.some-name-new-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s8c8si0anL + cat /tmp/tmp.pp8ThRn67o + rm /tmp/tmp.s8c8si0anL /tmp/tmp.pp8ThRn67o + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-new-rs0-2.some-name-new-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-new-rs0-2.some-name-new-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:02:25+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-new-rs0-2.some-name-new-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-new-rs0-2.some-name-new-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Cl2DO1Tka +++ mktemp ++ local LAST_ERR=/tmp/tmp.EsriDYg31K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0Cl2DO1Tka ++ cat /tmp/tmp.EsriDYg31K ++ rm /tmp/tmp.0Cl2DO1Tka /tmp/tmp.EsriDYg31K ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-new-rs0-2.some-name-new-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-new-rs0-2.some-name-new-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.6JwrASV0cd ++ mktemp + local LAST_ERR=/tmp/tmp.wjDIu2BtcD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-new-rs0-2.some-name-new-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6JwrASV0cd + cat /tmp/tmp.wjDIu2BtcD + rm /tmp/tmp.6JwrASV0cd /tmp/tmp.wjDIu2BtcD + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + log 'Data restored: OK' + set +o xtrace [2026-04-10T11:02:28+0000] Data restored: OK + echo 'Cleaning up new cluster' Cleaning up new cluster + kubectl_bin delete psmdb some-name-new ++ mktemp + local LAST_OUT=/tmp/tmp.8ys29hk2wA ++ mktemp + local LAST_ERR=/tmp/tmp.zblqp7WqRm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb some-name-new + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ys29hk2wA perconaservermongodb.psmdb.percona.com "some-name-new" deleted from demand-backup-snapshot-17683 namespace + cat /tmp/tmp.zblqp7WqRm + rm /tmp/tmp.8ys29hk2wA /tmp/tmp.zblqp7WqRm + return 0 + kubectl_bin wait --for=delete psmdb/some-name-new --timeout=600s ++ mktemp + local LAST_OUT=/tmp/tmp.mqHM6Ar0kf ++ mktemp + local LAST_ERR=/tmp/tmp.AesNLV1OuB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete psmdb/some-name-new --timeout=600s + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mqHM6Ar0kf + cat /tmp/tmp.AesNLV1OuB + rm /tmp/tmp.mqHM6Ar0kf /tmp/tmp.AesNLV1OuB + return 0 + desc 'Testing snapshot backup/restore on encrypted cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing snapshot backup/restore on encrypted cluster ----------------------------------------------------------------------------------- + kubectl_bin delete psmdb some-name ++ mktemp + local LAST_OUT=/tmp/tmp.n4FfkxH0vv ++ mktemp + local LAST_ERR=/tmp/tmp.0F1hzNqToj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb some-name + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n4FfkxH0vv perconaservermongodb.psmdb.percona.com "some-name" deleted from demand-backup-snapshot-17683 namespace + cat /tmp/tmp.0F1hzNqToj + rm /tmp/tmp.n4FfkxH0vv /tmp/tmp.0F1hzNqToj + return 0 + kubectl_bin wait --for=delete psmdb/some-name --timeout=600s ++ mktemp + local LAST_OUT=/tmp/tmp.kNn0z6YQ82 ++ mktemp + local LAST_ERR=/tmp/tmp.bDpshwcAMX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete psmdb/some-name --timeout=600s + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kNn0z6YQ82 + cat /tmp/tmp.bDpshwcAMX + rm /tmp/tmp.kNn0z6YQ82 /tmp/tmp.bDpshwcAMX + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.vrrjnO8nnP ++ mktemp + local LAST_ERR=/tmp/tmp.NfxV0oycqy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vrrjnO8nnP secret/some-users configured + cat /tmp/tmp.NfxV0oycqy Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.vrrjnO8nnP /tmp/tmp.NfxV0oycqy + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name-encrypted.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name-encrypted.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/some-name-encrypted.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-fe9d8c05"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-snapshot-17683/g + local LAST_OUT=/tmp/tmp.8zTJfRBpNa + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.Gyw6OsxQkc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8zTJfRBpNa perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.Gyw6OsxQkc + rm /tmp/tmp.8zTJfRBpNa /tmp/tmp.Gyw6OsxQkc + return 0 + echo 'check if all pods started' check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5lRKBrOTGD +++ mktemp ++ local LAST_ERR=/tmp/tmp.WPqDk9W4yr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5lRKBrOTGD ++ cat /tmp/tmp.WPqDk9W4yr ++ rm /tmp/tmp.5lRKBrOTGD /tmp/tmp.WPqDk9W4yr ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1Kxv2sk9S4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nJMo2pCclT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1Kxv2sk9S4 ++ cat /tmp/tmp.nJMo2pCclT ++ rm /tmp/tmp.1Kxv2sk9S4 /tmp/tmp.nJMo2pCclT ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jB5r5v64hm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kg5704ANK7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jB5r5v64hm ++ cat /tmp/tmp.Kg5704ANK7 ++ rm /tmp/tmp.jB5r5v64hm /tmp/tmp.Kg5704ANK7 ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness...................................... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o2sFcq7JUx +++ mktemp ++ local LAST_ERR=/tmp/tmp.2LzJULfodC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o2sFcq7JUx ++ cat /tmp/tmp.2LzJULfodC ++ rm /tmp/tmp.o2sFcq7JUx /tmp/tmp.2LzJULfodC ++ return 0 + [[ ready == ready ]] + echo .OK .OK + sleep 60 + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.OK + echo 'Creating app user and writing test data (encrypted)' Creating app user and writing test data (encrypted) + run_mongo_tls 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IwdAUlTFKs +++ mktemp ++ local LAST_ERR=/tmp/tmp.bYjO3CPhWi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IwdAUlTFKs ++ cat /tmp/tmp.bYjO3CPhWi ++ rm /tmp/tmp.IwdAUlTFKs /tmp/tmp.bYjO3CPhWi ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.hQnA90cHh3 ++ mktemp + local LAST_ERR=/tmp/tmp.Lt6h7qCmls + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hQnA90cHh3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-10T11:05:26.147Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("bb5f5fa9-d103-4d09-ae11-af2988c1f399") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.Lt6h7qCmls + rm /tmp/tmp.hQnA90cHh3 /tmp/tmp.Lt6h7qCmls + return 0 + sleep 1 + run_mongo_tls 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.soGzl74nZ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.m4YAJgVlFY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.soGzl74nZ4 ++ cat /tmp/tmp.m4YAJgVlFY ++ rm /tmp/tmp.soGzl74nZ4 /tmp/tmp.m4YAJgVlFY ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.dkBXpNH8yI ++ mktemp + local LAST_ERR=/tmp/tmp.RrrQgQ0PMR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dkBXpNH8yI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-10T11:05:29.320Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("31075109-6d72-453c-aeb5-1429349c6b85") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RrrQgQ0PMR + rm /tmp/tmp.dkBXpNH8yI /tmp/tmp.RrrQgQ0PMR + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:05:34+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2NHj5wwZ1W +++ mktemp ++ local LAST_ERR=/tmp/tmp.vFe4U0DvIl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2NHj5wwZ1W ++ cat /tmp/tmp.vFe4U0DvIl ++ rm /tmp/tmp.2NHj5wwZ1W /tmp/tmp.vFe4U0DvIl ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.iwJkVcjyTf ++ mktemp + local LAST_ERR=/tmp/tmp.yF2AGc7aFn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iwJkVcjyTf + cat /tmp/tmp.yF2AGc7aFn + rm /tmp/tmp.iwJkVcjyTf /tmp/tmp.yF2AGc7aFn + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:05:37+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R8RhBAH8RH +++ mktemp ++ local LAST_ERR=/tmp/tmp.W60lPane6A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R8RhBAH8RH ++ cat /tmp/tmp.W60lPane6A ++ rm /tmp/tmp.R8RhBAH8RH /tmp/tmp.W60lPane6A ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.2rhfWk9jn8 ++ mktemp + local LAST_ERR=/tmp/tmp.j8gullj1D1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2rhfWk9jn8 + cat /tmp/tmp.j8gullj1D1 + rm /tmp/tmp.2rhfWk9jn8 /tmp/tmp.j8gullj1D1 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:05:39+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3YF6I6kOiS +++ mktemp ++ local LAST_ERR=/tmp/tmp.MMepgnXTpm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3YF6I6kOiS ++ cat /tmp/tmp.MMepgnXTpm ++ rm /tmp/tmp.3YF6I6kOiS /tmp/tmp.MMepgnXTpm ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.jizeZS0YIK ++ mktemp + local LAST_ERR=/tmp/tmp.YJqNRDwiEN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jizeZS0YIK + cat /tmp/tmp.YJqNRDwiEN + rm /tmp/tmp.jizeZS0YIK /tmp/tmp.YJqNRDwiEN + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + echo 'Running snapshot backup (encrypted)' Running snapshot backup (encrypted) + backup_name_enc=backup-snapshot-encrypted + run_snapshot_backup backup-snapshot-encrypted + local backup_name=backup-snapshot-encrypted + log 'running snapshot backup backup-snapshot-encrypted' + set +o xtrace [2026-04-10T11:05:42+0000] running snapshot backup backup-snapshot-encrypted + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-snapshot-encrypted" | .spec.volumeSnapshotClass = "gke-snapshot-class"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/backup.yml ++ mktemp + local LAST_OUT=/tmp/tmp.H5VJCa3j0f ++ mktemp + local LAST_ERR=/tmp/tmp.L1EYb68EGc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H5VJCa3j0f perconaservermongodbbackup.psmdb.percona.com/backup-snapshot-encrypted created + cat /tmp/tmp.L1EYb68EGc + rm /tmp/tmp.H5VJCa3j0f /tmp/tmp.L1EYb68EGc + return 0 + wait_backup backup-snapshot-encrypted + local backup_name=backup-snapshot-encrypted + local target_state=ready + set +o xtrace waiting for backup-snapshot-encrypted to reach ready state........................OK + echo 'Drop collection and restore from snapshot (encrypted)' Drop collection and restore from snapshot (encrypted) + run_mongo_tls 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yRQsynCAhn +++ mktemp ++ local LAST_ERR=/tmp/tmp.xg2evTKsWG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yRQsynCAhn ++ cat /tmp/tmp.xg2evTKsWG ++ rm /tmp/tmp.yRQsynCAhn /tmp/tmp.xg2evTKsWG ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.kH5b3I5jn3 ++ mktemp + local LAST_ERR=/tmp/tmp.PsuYrmiRGI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kH5b3I5jn3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-10T11:06:34.011Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("a8ccf0cb-8fa8-4119-a07b-6e6df2e29f8f") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.PsuYrmiRGI + rm /tmp/tmp.kH5b3I5jn3 /tmp/tmp.PsuYrmiRGI + return 0 + run_restore backup-snapshot-encrypted + local backup_name=backup-snapshot-encrypted + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/restore.yml + log 'running restore restore-backup-snapshot-encrypted' + set +o xtrace [2026-04-10T11:06:34+0000] running restore restore-backup-snapshot-encrypted + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-snapshot-encrypted/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-snapshot-encrypted/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.mGZL5PWn08 ++ mktemp + local LAST_ERR=/tmp/tmp.YB4OFQaGXs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mGZL5PWn08 perconaservermongodbrestore.psmdb.percona.com/restore-backup-snapshot-encrypted created + cat /tmp/tmp.YB4OFQaGXs + rm /tmp/tmp.mGZL5PWn08 /tmp/tmp.YB4OFQaGXs + return 0 + run_snapshot_recovery_check backup-snapshot-encrypted + local backup_name=backup-snapshot-encrypted + local target_cluster=some-name + wait_restore backup-snapshot-encrypted some-name ready 0 3000 + local backup_name=backup-snapshot-encrypted + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-snapshot-encrypted object to be created.OK Waiting psmdb-restore/restore-backup-snapshot-encrypted to reach state "ready" .......OK after 6 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.metadata.annotations.percona\.com/resync-pbm}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SOa5q9SMt4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0Zy7WPRfOL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.metadata.annotations.percona\.com/resync-pbm}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SOa5q9SMt4 ++ cat /tmp/tmp.0Zy7WPRfOL ++ rm /tmp/tmp.SOa5q9SMt4 /tmp/tmp.0Zy7WPRfOL ++ return 0 + '[' true '!=' true ']' + log 'Operator triggered PBM resync: OK' + set +o xtrace [2026-04-10T11:12:54+0000] Operator triggered PBM resync: OK + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.baczhyS2jG +++ mktemp ++ local LAST_ERR=/tmp/tmp.avRsNTBPQ1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.baczhyS2jG ++ cat /tmp/tmp.avRsNTBPQ1 ++ rm /tmp/tmp.baczhyS2jG /tmp/tmp.avRsNTBPQ1 ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n1GcVxbmrx +++ mktemp ++ local LAST_ERR=/tmp/tmp.mwgF1G5Itz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n1GcVxbmrx ++ cat /tmp/tmp.mwgF1G5Itz ++ rm /tmp/tmp.n1GcVxbmrx /tmp/tmp.mwgF1G5Itz ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xgg9rY75NI +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZkcG3aumzf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xgg9rY75NI ++ cat /tmp/tmp.ZkcG3aumzf ++ rm /tmp/tmp.Xgg9rY75NI /tmp/tmp.ZkcG3aumzf ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yXEmErTXut +++ mktemp ++ local LAST_ERR=/tmp/tmp.QhdZ6boB4z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yXEmErTXut ++ cat /tmp/tmp.QhdZ6boB4z ++ rm /tmp/tmp.yXEmErTXut /tmp/tmp.QhdZ6boB4z ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9PsLIGsw4V +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mm0jMowMzU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9PsLIGsw4V ++ cat /tmp/tmp.Mm0jMowMzU ++ rm /tmp/tmp.9PsLIGsw4V /tmp/tmp.Mm0jMowMzU ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lAVI88ZTwn +++ mktemp ++ local LAST_ERR=/tmp/tmp.dqdGgrJl5x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lAVI88ZTwn ++ cat /tmp/tmp.dqdGgrJl5x ++ rm /tmp/tmp.lAVI88ZTwn /tmp/tmp.dqdGgrJl5x ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z16CSsBJqx +++ mktemp ++ local LAST_ERR=/tmp/tmp.1APirinR1g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z16CSsBJqx ++ cat /tmp/tmp.1APirinR1g ++ rm /tmp/tmp.z16CSsBJqx /tmp/tmp.1APirinR1g ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mWbjuhPnVa +++ mktemp ++ local LAST_ERR=/tmp/tmp.t3UDXQV7dn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mWbjuhPnVa ++ cat /tmp/tmp.t3UDXQV7dn ++ rm /tmp/tmp.mWbjuhPnVa /tmp/tmp.t3UDXQV7dn ++ return 0 + [[ ready == ready ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.OK + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:14:22+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pYUzO6m67t +++ mktemp ++ local LAST_ERR=/tmp/tmp.pU93S1D3rk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pYUzO6m67t ++ cat /tmp/tmp.pU93S1D3rk ++ rm /tmp/tmp.pYUzO6m67t /tmp/tmp.pU93S1D3rk ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.1vUXr6P2JS ++ mktemp + local LAST_ERR=/tmp/tmp.Upbb5V4VmW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1vUXr6P2JS + cat /tmp/tmp.Upbb5V4VmW + rm /tmp/tmp.1vUXr6P2JS /tmp/tmp.Upbb5V4VmW + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:14:24+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NeNq7jlyvv +++ mktemp ++ local LAST_ERR=/tmp/tmp.LRT6O6qEzh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NeNq7jlyvv ++ cat /tmp/tmp.LRT6O6qEzh ++ rm /tmp/tmp.NeNq7jlyvv /tmp/tmp.LRT6O6qEzh ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.urS9WLcPSn ++ mktemp + local LAST_ERR=/tmp/tmp.PB5P5J8z17 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.urS9WLcPSn + cat /tmp/tmp.PB5P5J8z17 + rm /tmp/tmp.urS9WLcPSn /tmp/tmp.PB5P5J8z17 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-10T11:14:27+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 mongodb '' '' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oW1Hfy0nPm +++ mktemp ++ local LAST_ERR=/tmp/tmp.GlWdX78hJz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oW1Hfy0nPm ++ cat /tmp/tmp.GlWdX78hJz ++ rm /tmp/tmp.oW1Hfy0nPm /tmp/tmp.GlWdX78hJz ++ return 0 + local client_container=psmdb-client-5649fbb65f-ghfrm + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.7CpiKZdNWH ++ mktemp + local LAST_ERR=/tmp/tmp.VykHWLP3sU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-ghfrm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-17683.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7CpiKZdNWH + cat /tmp/tmp.VykHWLP3sU + rm /tmp/tmp.7CpiKZdNWH /tmp/tmp.VykHWLP3sU + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/demand-backup-snapshot/compare/find.json /tmp/tmp.BOuuvUDXZF/find + log 'Data restored: OK' + set +o xtrace [2026-04-10T11:14:30+0000] Data restored: OK + destroy demand-backup-snapshot-17683 + local namespace=demand-backup-snapshot-17683 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.CcIVABiaOP +++ mktemp ++ local LAST_ERR=/tmp/tmp.yXTKxzg5JW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CcIVABiaOP ++ cat /tmp/tmp.yXTKxzg5JW ++ rm /tmp/tmp.CcIVABiaOP /tmp/tmp.yXTKxzg5JW ++ return 0 + '[' 2 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.RvpVBZPJJY ++ mktemp + local LAST_ERR=/tmp/tmp.McxDvgsgVJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RvpVBZPJJY NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-snapshot some-name external ready 30m 31m backup-snapshot-encrypted some-name external ready 8m4s 8m47s + cat /tmp/tmp.McxDvgsgVJ + rm /tmp/tmp.RvpVBZPJJY /tmp/tmp.McxDvgsgVJ + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.ts41cTrw4t ++ mktemp + local LAST_ERR=/tmp/tmp.PVyaXchKub + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ts41cTrw4t perconaservermongodbbackup.psmdb.percona.com "backup-snapshot" deleted from demand-backup-snapshot-17683 namespace perconaservermongodbbackup.psmdb.percona.com "backup-snapshot-encrypted" deleted from demand-backup-snapshot-17683 namespace + cat /tmp/tmp.PVyaXchKub + rm /tmp/tmp.ts41cTrw4t /tmp/tmp.PVyaXchKub + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.UVOOqArk5T ++ mktemp + local LAST_ERR=/tmp/tmp.H7WRqaUTDn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UVOOqArk5T customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.H7WRqaUTDn + rm /tmp/tmp.UVOOqArk5T /tmp/tmp.H7WRqaUTDn + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3zCkk2h7BJ ++ mktemp + local LAST_ERR=/tmp/tmp.cUxGLNPtGV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3zCkk2h7BJ + cat /tmp/tmp.cUxGLNPtGV + rm /tmp/tmp.3zCkk2h7BJ /tmp/tmp.cUxGLNPtGV + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.fbZmd0BwHZ ++ mktemp + local LAST_ERR=/tmp/tmp.CXV52FoFQO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fbZmd0BwHZ + cat /tmp/tmp.CXV52FoFQO + rm /tmp/tmp.fbZmd0BwHZ /tmp/tmp.CXV52FoFQO + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ZwMJOgd5mB ++ mktemp + local LAST_ERR=/tmp/tmp.wkAICE7Dhz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZwMJOgd5mB + cat /tmp/tmp.wkAICE7Dhz + rm /tmp/tmp.ZwMJOgd5mB /tmp/tmp.wkAICE7Dhz + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qNM3TvqtAc ++ mktemp + local LAST_ERR=/tmp/tmp.hgJF3geG7K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qNM3TvqtAc clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.hgJF3geG7K + rm /tmp/tmp.qNM3TvqtAc /tmp/tmp.hgJF3geG7K + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.0GBD55QQTU ++ mktemp + local LAST_ERR=/tmp/tmp.cqRgg65ojm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.0GBD55QQTU + cat /tmp/tmp.cqRgg65ojm Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.0GBD55QQTU + cat /tmp/tmp.cqRgg65ojm Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.0GBD55QQTU + cat /tmp/tmp.cqRgg65ojm Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.0GBD55QQTU + cat /tmp/tmp.cqRgg65ojm Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.0GBD55QQTU /tmp/tmp.cqRgg65ojm + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-snapshot-17683 + rm -rf /tmp/tmp.BOuuvUDXZF + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.QoWQa7S2X2 ++ mktemp + local LAST_OUT=/tmp/tmp.YTh1QnA0Pu ++ mktemp + local LAST_ERR=/tmp/tmp.YeV4sWJTOT + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.cTXf3jJNDB + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-snapshot-17683 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator