Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/demand-backup-incremental.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-incremental-13242 + local ns=demand-backup-incremental-13242 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.PzhhPnUEtO ++ mktemp + local LAST_ERR=/tmp/tmp.SJ7EA4ymkq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PzhhPnUEtO customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.SJ7EA4ymkq + rm /tmp/tmp.PzhhPnUEtO /tmp/tmp.SJ7EA4ymkq + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-14686 backup-azure-blob --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-14686 backup-gcp-cs --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-gcp-cs" not found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-14686 backup-minio --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-minio" not found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-14686 backup-minio-not-base --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-minio-not-base" not found + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Q34nSE0aJH ++ mktemp + local LAST_ERR=/tmp/tmp.0J1cB9mlTZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q34nSE0aJH customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.0J1cB9mlTZ + rm /tmp/tmp.Q34nSE0aJH /tmp/tmp.0J1cB9mlTZ + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.6fjT1S7bT6 ++ mktemp + local LAST_ERR=/tmp/tmp.WnTJRIWm18 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6fjT1S7bT6 + cat /tmp/tmp.WnTJRIWm18 + rm /tmp/tmp.6fjT1S7bT6 /tmp/tmp.WnTJRIWm18 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Lpchihe5Vm ++ mktemp + local LAST_ERR=/tmp/tmp.sf0TKspJKs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lpchihe5Vm + cat /tmp/tmp.sf0TKspJKs + rm /tmp/tmp.Lpchihe5Vm /tmp/tmp.sf0TKspJKs + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.pIrKMnSq0d ++ mktemp + local LAST_ERR=/tmp/tmp.puMhlFCIfM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pIrKMnSq0d clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.puMhlFCIfM + rm /tmp/tmp.pIrKMnSq0d /tmp/tmp.puMhlFCIfM + return 0 + check_crd_for_deletion PR-1961-970eeaa4 + local git_tag=PR-1961-970eeaa4 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1961-970eeaa4/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KuRtS8hUAc +++ mktemp ++ local LAST_ERR=/tmp/tmp.GBc27W7HPo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KuRtS8hUAc ++ cat /tmp/tmp.GBc27W7HPo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KuRtS8hUAc ++ cat /tmp/tmp.GBc27W7HPo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.KuRtS8hUAc ++ cat /tmp/tmp.GBc27W7HPo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.KuRtS8hUAc ++ cat /tmp/tmp.GBc27W7HPo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.KuRtS8hUAc /tmp/tmp.GBc27W7HPo ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.1OMHI5Y12V + local LAST_OUT=/tmp/tmp.9nV4fNQlez ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.oWUAhlDrkr + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.mmbD9HEkag + local exit_status=0 ++ seq 0 2 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1OMHI5Y12V + cat /tmp/tmp.oWUAhlDrkr + rm /tmp/tmp.1OMHI5Y12V /tmp/tmp.oWUAhlDrkr + return 0 namespace "demand-backup-incremental-14686" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9nV4fNQlez namespace "psmdb-operator" deleted + cat /tmp/tmp.mmbD9HEkag + rm /tmp/tmp.9nV4fNQlez /tmp/tmp.mmbD9HEkag + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jeeTTfvIcG ++ mktemp + local LAST_ERR=/tmp/tmp.xjOFo01Ss1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jeeTTfvIcG + cat /tmp/tmp.xjOFo01Ss1 + rm /tmp/tmp.jeeTTfvIcG /tmp/tmp.xjOFo01Ss1 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.OiUyYtAsfI ++ mktemp + local LAST_ERR=/tmp/tmp.qD5hLse6qn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OiUyYtAsfI namespace/psmdb-operator created + cat /tmp/tmp.qD5hLse6qn + rm /tmp/tmp.OiUyYtAsfI /tmp/tmp.qD5hLse6qn + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.cYEAPdvbcw +++ mktemp ++ local LAST_ERR=/tmp/tmp.tmmo2RsvwQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cYEAPdvbcw ++ cat /tmp/tmp.tmmo2RsvwQ ++ rm /tmp/tmp.cYEAPdvbcw /tmp/tmp.tmmo2RsvwQ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.sLPEnjZnc3 ++ mktemp + local LAST_ERR=/tmp/tmp.t0cawc3tZH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sLPEnjZnc3 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster7" modified. + cat /tmp/tmp.t0cawc3tZH + rm /tmp/tmp.sLPEnjZnc3 /tmp/tmp.t0cawc3tZH + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.BGoBAck8T4 ++ mktemp + local LAST_ERR=/tmp/tmp.lPGD9WOI0T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BGoBAck8T4 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.lPGD9WOI0T + rm /tmp/tmp.BGoBAck8T4 /tmp/tmp.lPGD9WOI0T + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.A9NFRwWmjg ++ mktemp + local LAST_ERR=/tmp/tmp.ItJNs2qWXW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A9NFRwWmjg clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ItJNs2qWXW + rm /tmp/tmp.A9NFRwWmjg /tmp/tmp.ItJNs2qWXW + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1961-970eeaa4") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PTfV2tElOm ++ mktemp + local LAST_ERR=/tmp/tmp.LkHHVPdgv2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PTfV2tElOm deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.LkHHVPdgv2 + rm /tmp/tmp.PTfV2tElOm /tmp/tmp.LkHHVPdgv2 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.jVRpC5MbYo +++ mktemp ++ local LAST_ERR=/tmp/tmp.kh1Bki9nCx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jVRpC5MbYo ++ cat /tmp/tmp.kh1Bki9nCx ++ rm /tmp/tmp.jVRpC5MbYo /tmp/tmp.kh1Bki9nCx ++ return 0 + wait_pod percona-server-mongodb-operator-7cfd5675c9-q4n7t + local pod=percona-server-mongodb-operator-7cfd5675c9-q4n7t + set +o xtrace waiting for pod/percona-server-mongodb-operator-7cfd5675c9-q4n7t to be ready..OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fo6IioMuv1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RGCP4bOR9K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fo6IioMuv1 ++ cat /tmp/tmp.RGCP4bOR9K ++ rm /tmp/tmp.Fo6IioMuv1 /tmp/tmp.RGCP4bOR9K ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-7cfd5675c9-q4n7t ++ mktemp + local LAST_OUT=/tmp/tmp.A8aXPdCUly ++ mktemp + local LAST_ERR=/tmp/tmp.Sfvl71yKex + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-7cfd5675c9-q4n7t + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A8aXPdCUly + cat /tmp/tmp.Sfvl71yKex + rm /tmp/tmp.A8aXPdCUly /tmp/tmp.Sfvl71yKex + return 0 2025-08-06T20:16:53.267Z INFO setup Manager starting up {"gitCommit": "970eeaa4b725700bdc63e3d7ec04ab0be1ecce33", "gitBranch": "PR-1961-970eeaa4", "buildTime": "", "goVersion": "go1.24.5", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-incremental-13242 + local namespace=demand-backup-incremental-13242 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-incremental-13242' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-incremental-13242 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-incremental-13242 --ignore-not-found ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.NRplGV5Ez3 ++ mktemp + local LAST_OUT=/tmp/tmp.vGm7BWFXEC + local LAST_ERR=/tmp/tmp.tMUFaAS8Ve + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.pbNHSvpOMY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-incremental-13242 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NRplGV5Ez3 + cat /tmp/tmp.tMUFaAS8Ve + rm /tmp/tmp.NRplGV5Ez3 /tmp/tmp.tMUFaAS8Ve + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vGm7BWFXEC + cat /tmp/tmp.pbNHSvpOMY + rm /tmp/tmp.vGm7BWFXEC /tmp/tmp.pbNHSvpOMY + return 0 + kubectl_bin wait --for=delete namespace demand-backup-incremental-13242 ++ mktemp + local LAST_OUT=/tmp/tmp.x3H55VJdCr ++ mktemp + local LAST_ERR=/tmp/tmp.ubTq2bhBfF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace demand-backup-incremental-13242 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x3H55VJdCr + cat /tmp/tmp.ubTq2bhBfF + rm /tmp/tmp.x3H55VJdCr /tmp/tmp.ubTq2bhBfF + return 0 + desc 'create namespace demand-backup-incremental-13242' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-incremental-13242 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-incremental-13242 ++ mktemp + local LAST_OUT=/tmp/tmp.obUV1v9Gx1 ++ mktemp + local LAST_ERR=/tmp/tmp.wOe2ubJWtJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-incremental-13242 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.obUV1v9Gx1 namespace/demand-backup-incremental-13242 created + cat /tmp/tmp.wOe2ubJWtJ + rm /tmp/tmp.obUV1v9Gx1 /tmp/tmp.wOe2ubJWtJ + return 0 + set_kube_ctx demand-backup-incremental-13242 + local namespace=demand-backup-incremental-13242 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.zLFP9VHoUM +++ mktemp ++ local LAST_ERR=/tmp/tmp.7czlZlKVdS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zLFP9VHoUM ++ cat /tmp/tmp.7czlZlKVdS ++ rm /tmp/tmp.zLFP9VHoUM /tmp/tmp.7czlZlKVdS ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster7 --namespace=demand-backup-incremental-13242 ++ mktemp + local LAST_OUT=/tmp/tmp.tlAabw9ixU ++ mktemp + local LAST_ERR=/tmp/tmp.BraNGpiPWs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster7 --namespace=demand-backup-incremental-13242 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tlAabw9ixU Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-970eeaa4-5-cluster7" modified. + cat /tmp/tmp.BraNGpiPWs + rm /tmp/tmp.tlAabw9ixU /tmp/tmp.BraNGpiPWs + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Wed Aug 6 20:17:06 2025 NAMESPACE: demand-backup-incremental-13242 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-incremental-13242.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-incremental-13242 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-incremental-13242 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-incremental-13242 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-incremental-13242 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cmp9j8L1Wj +++ mktemp ++ local LAST_ERR=/tmp/tmp.qFOkDu49yA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cmp9j8L1Wj ++ cat /tmp/tmp.qFOkDu49yA ++ rm /tmp/tmp.cmp9j8L1Wj /tmp/tmp.qFOkDu49yA ++ return 0 + MINIO_POD=minio-service-86dfccd949-lqgtx + wait_pod minio-service-86dfccd949-lqgtx + local pod=minio-service-86dfccd949-lqgtx + set +o xtrace waiting for pod/minio-service-86dfccd949-lqgtx to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-13242.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.o16QTtxTPq ++ mktemp + local LAST_ERR=/tmp/tmp.Km1clQEiBK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-13242.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o16QTtxTPq service/minio-service created + cat /tmp/tmp.Km1clQEiBK + rm /tmp/tmp.o16QTtxTPq /tmp/tmp.Km1clQEiBK + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.4BmzG5wEh6 ++ mktemp + local LAST_ERR=/tmp/tmp.ktVV5WBPs8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4BmzG5wEh6 pod "aws-cli" deleted + cat /tmp/tmp.ktVV5WBPs8 If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.4BmzG5wEh6 /tmp/tmp.ktVV5WBPs8 + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.KuYNZ1Qfi3 ++ mktemp + local LAST_ERR=/tmp/tmp.ua3RHD0Wf2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KuYNZ1Qfi3 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.ua3RHD0Wf2 + rm /tmp/tmp.KuYNZ1Qfi3 /tmp/tmp.ua3RHD0Wf2 + return 0 + desc 'Testing on not sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on not sharded cluster ----------------------------------------------------------------------------------- + log 'Creating PSMDB cluster' + set +o xtrace [2025-08-06T20:17:49+0000] Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4IkaalAGiq ++ mktemp + local LAST_ERR=/tmp/tmp.0uRHhXb6e2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4IkaalAGiq secret/some-users created + cat /tmp/tmp.0uRHhXb6e2 + rm /tmp/tmp.4IkaalAGiq /tmp/tmp.0uRHhXb6e2 + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1961-970eeaa4"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.S3WuiC7bev ++ mktemp + local LAST_ERR=/tmp/tmp.8K6aEFgGKP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S3WuiC7bev perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.8K6aEFgGKP + rm /tmp/tmp.S3WuiC7bev /tmp/tmp.8K6aEFgGKP + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.paQhjiRZ3P ++ mktemp + local LAST_ERR=/tmp/tmp.A9VsNnPXE2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.paQhjiRZ3P deployment.apps/psmdb-client created + cat /tmp/tmp.A9VsNnPXE2 + rm /tmp/tmp.paQhjiRZ3P /tmp/tmp.A9VsNnPXE2 + return 0 + log 'check if all pods started' + set +o xtrace [2025-08-06T20:17:53+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cXUUYgfeW2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PgDUObgtHp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cXUUYgfeW2 ++ cat /tmp/tmp.PgDUObgtHp ++ rm /tmp/tmp.cXUUYgfeW2 /tmp/tmp.PgDUObgtHp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1CJqBRPM3y +++ mktemp ++ local LAST_ERR=/tmp/tmp.MtyplA2hhk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1CJqBRPM3y ++ cat /tmp/tmp.MtyplA2hhk ++ rm /tmp/tmp.1CJqBRPM3y /tmp/tmp.MtyplA2hhk ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YkZTTp71p0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OIx0iSx4dX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YkZTTp71p0 ++ cat /tmp/tmp.OIx0iSx4dX ++ rm /tmp/tmp.YkZTTp71p0 /tmp/tmp.OIx0iSx4dX ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DGQScJi0Aa +++ mktemp ++ local LAST_ERR=/tmp/tmp.vu5tkOAAkT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DGQScJi0Aa ++ cat /tmp/tmp.vu5tkOAAkT ++ rm /tmp/tmp.DGQScJi0Aa /tmp/tmp.vu5tkOAAkT ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + log 'writing test data' + set +o xtrace [2025-08-06T20:19:35+0000] writing test data + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-13242 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2knGuDsCIW +++ mktemp ++ local LAST_ERR=/tmp/tmp.BnAwaPE2h3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2knGuDsCIW ++ cat /tmp/tmp.BnAwaPE2h3 ++ rm /tmp/tmp.2knGuDsCIW /tmp/tmp.BnAwaPE2h3 ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1ArIcUUazY ++ mktemp + local LAST_ERR=/tmp/tmp.bc6uRVr4MG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1ArIcUUazY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ecaf6aa3-2cc0-4462-9cd0-88a1bb90d767") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.bc6uRVr4MG + rm /tmp/tmp.1ArIcUUazY /tmp/tmp.bc6uRVr4MG + return 0 + sleep 1 + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7FEHpM8by2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BjWoiIDFcl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7FEHpM8by2 ++ cat /tmp/tmp.BjWoiIDFcl ++ rm /tmp/tmp.7FEHpM8by2 /tmp/tmp.BjWoiIDFcl ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.p2jrjj3vnj ++ mktemp + local LAST_ERR=/tmp/tmp.qYXTsQajMI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p2jrjj3vnj Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0cf24270-0de1-4e3c-b544-b3ea0b13816b") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.qYXTsQajMI + rm /tmp/tmp.p2jrjj3vnj /tmp/tmp.qYXTsQajMI + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:19:45+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j02QfFVUas +++ mktemp ++ local LAST_ERR=/tmp/tmp.WUXPRUdLk5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j02QfFVUas ++ cat /tmp/tmp.WUXPRUdLk5 ++ rm /tmp/tmp.j02QfFVUas /tmp/tmp.WUXPRUdLk5 ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IxWADhE22V ++ mktemp + local LAST_ERR=/tmp/tmp.ZMCM0rnT5M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IxWADhE22V + cat /tmp/tmp.ZMCM0rnT5M + rm /tmp/tmp.IxWADhE22V /tmp/tmp.ZMCM0rnT5M + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:19:47+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TxtoacBcu2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.h1tabYgLAD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TxtoacBcu2 ++ cat /tmp/tmp.h1tabYgLAD ++ rm /tmp/tmp.TxtoacBcu2 /tmp/tmp.h1tabYgLAD ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8lMPdxoRey ++ mktemp + local LAST_ERR=/tmp/tmp.AnthXs2x6o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8lMPdxoRey + cat /tmp/tmp.AnthXs2x6o + rm /tmp/tmp.8lMPdxoRey /tmp/tmp.AnthXs2x6o + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:19:49+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qu3vLxRzel +++ mktemp ++ local LAST_ERR=/tmp/tmp.BZnlBy8Aa8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qu3vLxRzel ++ cat /tmp/tmp.BZnlBy8Aa8 ++ rm /tmp/tmp.Qu3vLxRzel /tmp/tmp.BZnlBy8Aa8 ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.FJeWsvfAqG ++ mktemp + local LAST_ERR=/tmp/tmp.rThZRBs8aJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FJeWsvfAqG + cat /tmp/tmp.rThZRBs8aJ + rm /tmp/tmp.FJeWsvfAqG /tmp/tmp.rThZRBs8aJ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + log 'running backups' + set +o xtrace [2025-08-06T20:19:52+0000] running backups + '[' -z '' ']' + backup_name_aws=backup-aws-s3 + backup_name_gcp=backup-gcp-cs + backup_name_azure=backup-azure-blob + run_backup aws-s3 backup-aws-s3 + local storage=aws-s3 + local backup_name=backup-aws-s3 + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-aws-s3/' + yq '.spec.type="incremental-base"' + /usr/bin/sed -e 's/storageName:/storageName: aws-s3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.NI9dEg8fqM ++ mktemp + local LAST_ERR=/tmp/tmp.S1MemzEjEJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NI9dEg8fqM perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 created + cat /tmp/tmp.S1MemzEjEJ + rm /tmp/tmp.NI9dEg8fqM /tmp/tmp.S1MemzEjEJ + return 0 + run_backup gcp-cs backup-gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/storageName:/storageName: gcp-cs/' + /usr/bin/sed -e 's/name:/name: backup-gcp-cs/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Tpswos94mP ++ mktemp + local LAST_ERR=/tmp/tmp.HkIWj42bxy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tpswos94mP perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.HkIWj42bxy + rm /tmp/tmp.Tpswos94mP /tmp/tmp.HkIWj42bxy + return 0 + run_backup azure-blob backup-azure-blob + local storage=azure-blob + local backup_name=backup-azure-blob + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-azure-blob/' + /usr/bin/sed -e 's/storageName:/storageName: azure-blob/' + kubectl_bin apply -f - + yq '.spec.type="incremental-base"' ++ mktemp + local LAST_OUT=/tmp/tmp.jSFRPyamQI ++ mktemp + local LAST_ERR=/tmp/tmp.9q5NxP04ni + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jSFRPyamQI perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob created + cat /tmp/tmp.9q5NxP04ni + rm /tmp/tmp.jSFRPyamQI /tmp/tmp.9q5NxP04ni + return 0 + wait_backup backup-aws-s3 + local backup_name=backup-aws-s3 + local target_state=ready + set +o xtrace waiting for backup-aws-s3 to reach ready state.......... + check_backup_in_storage backup-aws-s3 s3 rs0 + local backup=backup-aws-s3 + local storage_type=s3 + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=s3.amazonaws.com ++ get_backup_dest backup-aws-s3 ++ local backup_name=backup-aws-s3 ++ /usr/bin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ sed -e 's/.json$//' ++ kubectl_bin get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EfIo7qD7lM +++ mktemp ++ local LAST_ERR=/tmp/tmp.KvvGj74T3W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EfIo7qD7lM ++ cat /tmp/tmp.KvvGj74T3W ++ rm /tmp/tmp.EfIo7qD7lM /tmp/tmp.KvvGj74T3W ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:19:54Z + [[ s3 == \m\i\n\i\o ]] + local url=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:19:54Z/rs0/filelist.pbm + log 'checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:19:54Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-06T20:20:15+0000] checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:19:54Z/rs0/filelist.pbm exists + curl --fail --head https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:19:54Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 13575 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK x-amz-id-2: oyK0UNqbjx/6UTeGvMM27z0EMjhwizTEVx5myb7tuboFhfwpYeKxmSTehpN6q/Xqe1uIwU7rHVvVOVryvp+ymZDx51X7jcVCo9cY853bgqs= x-amz-request-id: 69SNF2VV3R2RH3YZ Date: Wed, 06 Aug 2025 20:20:17 GMT Last-Modified: Wed, 06 Aug 2025 20:20:12 GMT x-amz-expiration: expiry-date="Fri, 08 Aug 2025 00:00:00 GMT", rule-id="1 Days Cleanup" ETag: "0529d3e7d142ffe9bdf96e783ca99c94" x-amz-server-side-encryption: AES256 Accept-Ranges: bytes Content-Type: application/octet-stream Content-Length: 13575 Server: AmazonS3 + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + local target_state=ready + set +o xtrace waiting for backup-gcp-cs to reach ready state............ + check_backup_in_storage backup-gcp-cs gcs rs0 + local backup=backup-gcp-cs + local storage_type=gcs + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=storage.googleapis.com ++ get_backup_dest backup-gcp-cs ++ local backup_name=backup-gcp-cs ++ /usr/bin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ sed -e 's/.json$//' ++ kubectl_bin get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ sed 's|azure://||' ++ sed 's|s3://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MpKlEFMhvx +++ mktemp ++ local LAST_ERR=/tmp/tmp.xR0mCw9FkB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MpKlEFMhvx ++ cat /tmp/tmp.xR0mCw9FkB ++ rm /tmp/tmp.MpKlEFMhvx /tmp/tmp.xR0mCw9FkB ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:16Z + [[ gcs == \m\i\n\i\o ]] + local url=https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:16Z/rs0/filelist.pbm + log 'checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:16Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-06T20:20:38+0000] checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:16Z/rs0/filelist.pbm exists + curl --fail --head https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:16Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 14375 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/2 200 content-type: application/octet-stream x-guploader-uploadid: ABgVH8_WgXt0p3W8k7s9LucI0Znbtuf3taaxznd5CYQUoN3tVeS7rfRVRW9BGEPJ-10eONwS expires: Wed, 06 Aug 2025 21:20:38 GMT date: Wed, 06 Aug 2025 20:20:38 GMT cache-control: public, max-age=3600 last-modified: Wed, 06 Aug 2025 20:20:34 GMT etag: "7a27431916ed778da50d56c261a137a1" x-goog-generation: 1754511634524621 x-goog-metageneration: 1 x-goog-stored-content-encoding: identity x-goog-stored-content-length: 14375 x-goog-hash: crc32c=0f8HVA== x-goog-hash: md5=eidDGRbtd42lDVbCYaE3oQ== x-goog-expiration: Thu, 07 Aug 2025 20:20:34 GMT x-goog-storage-class: REGIONAL accept-ranges: bytes content-length: 14375 server: UploadServer alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + wait_backup backup-azure-blob + local backup_name=backup-azure-blob + local target_state=ready + set +o xtrace waiting for backup-azure-blob to reach ready state........... + check_backup_in_storage backup-azure-blob azure rs0 + local backup=backup-azure-blob + local storage_type=azure + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net ++ get_backup_dest backup-azure-blob ++ local backup_name=backup-azure-blob ++ /usr/bin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|azure://||' ++ sed 's|s3://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6PQq9MN9pX +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZL2b8EWXz6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6PQq9MN9pX ++ cat /tmp/tmp.ZL2b8EWXz6 ++ rm /tmp/tmp.6PQq9MN9pX /tmp/tmp.ZL2b8EWXz6 ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:38Z + [[ azure == \m\i\n\i\o ]] + local url=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:38Z/rs0/filelist.pbm + log 'checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:38Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-06T20:20:59+0000] checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:38Z/rs0/filelist.pbm exists + curl --fail --head https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:38Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 15175 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK Content-Length: 15175 Content-Type: application/octet-stream Content-MD5: Q9m3TBogyFxrvrrCVLDlaQ== Last-Modified: Wed, 06 Aug 2025 20:20:55 GMT ETag: 0x8DDD526BF636A53 Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 x-ms-request-id: 345ee96a-e01e-002e-6f0f-078d78000000 x-ms-version: 2009-09-19 x-ms-lease-status: unlocked x-ms-blob-type: BlockBlob Date: Wed, 06 Aug 2025 20:20:59 GMT + backup_name_minio=backup-minio + run_backup minio backup-minio + local storage=minio + local backup_name=backup-minio + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/name:/name: backup-minio/' + /usr/bin/sed -e 's/storageName:/storageName: minio/' + kubectl_bin apply -f - + yq '.spec.type="incremental-base"' ++ mktemp + local LAST_OUT=/tmp/tmp.A89V89YoaV ++ mktemp + local LAST_ERR=/tmp/tmp.tR6sAGFHq2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A89V89YoaV perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.tR6sAGFHq2 + rm /tmp/tmp.A89V89YoaV /tmp/tmp.tR6sAGFHq2 + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state...... + check_backup_in_storage backup-minio minio rs0 + local backup=backup-minio + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ /usr/bin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ sed -e 's/.json$//' ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AkxcLtG0L0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VnG5biyS0s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AkxcLtG0L0 ++ cat /tmp/tmp.VnG5biyS0s ++ rm /tmp/tmp.AkxcLtG0L0 /tmp/tmp.VnG5biyS0s ++ return 0 + backup_dest=operator-testing/2025-08-06T20:21:02Z + [[ minio == \m\i\n\i\o ]] + grep filelist.pbm + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:21:02Z/rs0/filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.nOutIq0MiH ++ mktemp + local LAST_ERR=/tmp/tmp.m2YJ8hrn5V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:21:02Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nOutIq0MiH + cat /tmp/tmp.m2YJ8hrn5V + rm /tmp/tmp.nOutIq0MiH /tmp/tmp.m2YJ8hrn5V + return 0 2025-08-06 20:21:09 13661 filelist.pbm + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HNlGrGwEJj +++ mktemp ++ local LAST_ERR=/tmp/tmp.K8EN7UDxpd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HNlGrGwEJj ++ cat /tmp/tmp.K8EN7UDxpd ++ rm /tmp/tmp.HNlGrGwEJj /tmp/tmp.K8EN7UDxpd ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.XktesGJz3k ++ mktemp + local LAST_ERR=/tmp/tmp.EO2ZGqxhWX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XktesGJz3k Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ab9f2658-a92b-41a0-95c3-52b880f10fc9") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.EO2ZGqxhWX + rm /tmp/tmp.XktesGJz3k /tmp/tmp.EO2ZGqxhWX + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:21:25+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XMvDkimsDn +++ mktemp ++ local LAST_ERR=/tmp/tmp.FwnrTBHwJb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XMvDkimsDn ++ cat /tmp/tmp.FwnrTBHwJb ++ rm /tmp/tmp.XMvDkimsDn /tmp/tmp.FwnrTBHwJb ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JHcjxSBIox ++ mktemp + local LAST_ERR=/tmp/tmp.Hj9SLiTK1g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JHcjxSBIox + cat /tmp/tmp.Hj9SLiTK1g + rm /tmp/tmp.JHcjxSBIox /tmp/tmp.Hj9SLiTK1g + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.U3H0MN62p3/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:21:27+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ysO0Ql51VP +++ mktemp ++ local LAST_ERR=/tmp/tmp.IXs0NOsRhQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ysO0Ql51VP ++ cat /tmp/tmp.IXs0NOsRhQ ++ rm /tmp/tmp.ysO0Ql51VP /tmp/tmp.IXs0NOsRhQ ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.5WurukAoCz ++ mktemp + local LAST_ERR=/tmp/tmp.KeEjYrcsCE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5WurukAoCz + cat /tmp/tmp.KeEjYrcsCE + rm /tmp/tmp.5WurukAoCz /tmp/tmp.KeEjYrcsCE + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.U3H0MN62p3/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:21:29+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wVibnnky84 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KhrWozwSxP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wVibnnky84 ++ cat /tmp/tmp.KhrWozwSxP ++ rm /tmp/tmp.wVibnnky84 /tmp/tmp.KhrWozwSxP ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8BdM2SEnBt ++ mktemp + local LAST_ERR=/tmp/tmp.9AFfqvVfI7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8BdM2SEnBt + cat /tmp/tmp.9AFfqvVfI7 + rm /tmp/tmp.8BdM2SEnBt /tmp/tmp.9AFfqvVfI7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.U3H0MN62p3/find-not-base + backup_name_minio_not_base=backup-minio-not-base + run_backup minio backup-minio-not-base false + local storage=minio + local backup_name=backup-minio-not-base + local base=false + local backup_type=incremental + [[ false == \t\r\u\e ]] + /usr/bin/sed -e 's/name:/name: backup-minio-not-base/' + /usr/bin/sed -e 's/storageName:/storageName: minio/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/backup.yml + yq '.spec.type="incremental"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.lpE5zNVill ++ mktemp + local LAST_ERR=/tmp/tmp.B96jm5YMvA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lpE5zNVill perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base created + cat /tmp/tmp.B96jm5YMvA + rm /tmp/tmp.lpE5zNVill /tmp/tmp.B96jm5YMvA + return 0 + wait_backup backup-minio-not-base + local backup_name=backup-minio-not-base + local target_state=ready + set +o xtrace waiting for backup-minio-not-base to reach ready state...... + check_backup_in_storage backup-minio-not-base minio rs0 + local backup=backup-minio-not-base + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio-not-base ++ local backup_name=backup-minio-not-base ++ /usr/bin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio-not-base -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|azure://||' ++ sed 's|s3://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BWJB6XnYeq +++ mktemp ++ local LAST_ERR=/tmp/tmp.SalZmKwOq9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio-not-base -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BWJB6XnYeq ++ cat /tmp/tmp.SalZmKwOq9 ++ rm /tmp/tmp.BWJB6XnYeq /tmp/tmp.SalZmKwOq9 ++ return 0 + backup_dest=operator-testing/2025-08-06T20:21:33Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:21:33Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.NqzNc6mvRA ++ mktemp + local LAST_ERR=/tmp/tmp.HeELAHuWiY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:21:33Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NqzNc6mvRA + cat /tmp/tmp.HeELAHuWiY If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.NqzNc6mvRA /tmp/tmp.HeELAHuWiY + return 0 + sleep 1 + let retry+=1 + '[' 7 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:21:33Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.Am7gTH7Q6u ++ mktemp + local LAST_ERR=/tmp/tmp.2u0I7WXg5X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:21:33Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Am7gTH7Q6u + cat /tmp/tmp.2u0I7WXg5X + rm /tmp/tmp.Am7gTH7Q6u /tmp/tmp.2u0I7WXg5X + return 0 2025-08-06 20:21:39 13661 filelist.pbm + '[' -z '' ']' + run_restore backup-aws-s3 + local backup_name=backup-aws-s3 + log 'drop collection' + set +o xtrace [2025-08-06T20:21:54+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Pnpjg0FRVW +++ mktemp ++ local LAST_ERR=/tmp/tmp.ewoxNEEkHM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Pnpjg0FRVW ++ cat /tmp/tmp.ewoxNEEkHM ++ rm /tmp/tmp.Pnpjg0FRVW /tmp/tmp.ewoxNEEkHM ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.AFGh5OHbi8 ++ mktemp + local LAST_ERR=/tmp/tmp.0RnAwxjWuE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AFGh5OHbi8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("24dbf541-c639-427a-969d-c0cbde43e7f6") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.0RnAwxjWuE + rm /tmp/tmp.AFGh5OHbi8 /tmp/tmp.0RnAwxjWuE + return 0 + log 'check backup and restore -- backup-aws-s3' + set +o xtrace [2025-08-06T20:21:56+0000] check backup and restore -- backup-aws-s3 + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-aws-s3/' + /usr/bin/sed -e 's/name:/name: restore-backup-aws-s3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.tTiyUHvCCy ++ mktemp + local LAST_ERR=/tmp/tmp.lFBz3h45U8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tTiyUHvCCy perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3 created + cat /tmp/tmp.lFBz3h45U8 + rm /tmp/tmp.tTiyUHvCCy /tmp/tmp.lFBz3h45U8 + return 0 + run_recovery_check backup-aws-s3 + local backup_name=backup-aws-s3 + local compare_suffix=_restore + local base=true + wait_restore backup-aws-s3 some-name requested 0 3000 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "requested" .OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-13242", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.WikuRATF9C ++ mktemp + local LAST_ERR=/tmp/tmp.Hqi9K2Yuhp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WikuRATF9C + cat /tmp/tmp.Hqi9K2Yuhp + rm /tmp/tmp.WikuRATF9C /tmp/tmp.Hqi9K2Yuhp + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + wait_restore backup-aws-s3 some-name ready 0 1800 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a2tP9emEwH +++ mktemp ++ local LAST_ERR=/tmp/tmp.WD8xInLtuD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a2tP9emEwH ++ cat /tmp/tmp.WD8xInLtuD ++ rm /tmp/tmp.a2tP9emEwH /tmp/tmp.WD8xInLtuD ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7xrOE8CBxL +++ mktemp ++ local LAST_ERR=/tmp/tmp.cYqLoptS4P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7xrOE8CBxL ++ cat /tmp/tmp.cYqLoptS4P ++ rm /tmp/tmp.7xrOE8CBxL /tmp/tmp.cYqLoptS4P ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yna37aARK7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wPAmMVJH8w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yna37aARK7 ++ cat /tmp/tmp.wPAmMVJH8w ++ rm /tmp/tmp.yna37aARK7 /tmp/tmp.wPAmMVJH8w ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VQEQuAf8Jd +++ mktemp ++ local LAST_ERR=/tmp/tmp.xWoegxCAL2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VQEQuAf8Jd ++ cat /tmp/tmp.xWoegxCAL2 ++ rm /tmp/tmp.VQEQuAf8Jd /tmp/tmp.xWoegxCAL2 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c1Z1zI2I15 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VwxK3ZBIi4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c1Z1zI2I15 ++ cat /tmp/tmp.VwxK3ZBIi4 ++ rm /tmp/tmp.c1Z1zI2I15 /tmp/tmp.VwxK3ZBIi4 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OhOcwphOQD +++ mktemp ++ local LAST_ERR=/tmp/tmp.hB5uGUvCD7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OhOcwphOQD ++ cat /tmp/tmp.hB5uGUvCD7 ++ rm /tmp/tmp.OhOcwphOQD /tmp/tmp.hB5uGUvCD7 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UIMqLe6Xsl +++ mktemp ++ local LAST_ERR=/tmp/tmp.ud4DejQsjX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UIMqLe6Xsl ++ cat /tmp/tmp.ud4DejQsjX ++ rm /tmp/tmp.UIMqLe6Xsl /tmp/tmp.ud4DejQsjX ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pizCZ8Y7cB +++ mktemp ++ local LAST_ERR=/tmp/tmp.7jPxrkYSCp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pizCZ8Y7cB ++ cat /tmp/tmp.7jPxrkYSCp ++ rm /tmp/tmp.pizCZ8Y7cB /tmp/tmp.7jPxrkYSCp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TY5MJT1LJv +++ mktemp ++ local LAST_ERR=/tmp/tmp.j0drIsbsPj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TY5MJT1LJv ++ cat /tmp/tmp.j0drIsbsPj ++ rm /tmp/tmp.TY5MJT1LJv /tmp/tmp.j0drIsbsPj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dOBzz4QXm7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.e2CZrv3x58 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dOBzz4QXm7 ++ cat /tmp/tmp.e2CZrv3x58 ++ rm /tmp/tmp.dOBzz4QXm7 /tmp/tmp.e2CZrv3x58 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zbTzhzKDjr +++ mktemp ++ local LAST_ERR=/tmp/tmp.cbLsusHmEg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zbTzhzKDjr ++ cat /tmp/tmp.cbLsusHmEg ++ rm /tmp/tmp.zbTzhzKDjr /tmp/tmp.cbLsusHmEg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EaHEIBBRIC +++ mktemp ++ local LAST_ERR=/tmp/tmp.mSb5DrHcC3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EaHEIBBRIC ++ cat /tmp/tmp.mSb5DrHcC3 ++ rm /tmp/tmp.EaHEIBBRIC /tmp/tmp.mSb5DrHcC3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.swnV8PtxzT +++ mktemp ++ local LAST_ERR=/tmp/tmp.nzwKJmbDoZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.swnV8PtxzT ++ cat /tmp/tmp.nzwKJmbDoZ ++ rm /tmp/tmp.swnV8PtxzT /tmp/tmp.nzwKJmbDoZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CK4OjbuOD9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rMjxvPnxMX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CK4OjbuOD9 ++ cat /tmp/tmp.rMjxvPnxMX ++ rm /tmp/tmp.CK4OjbuOD9 /tmp/tmp.rMjxvPnxMX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BzIk7b3wA9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.d0CdVcpcus ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BzIk7b3wA9 ++ cat /tmp/tmp.d0CdVcpcus ++ rm /tmp/tmp.BzIk7b3wA9 /tmp/tmp.d0CdVcpcus ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:28:05+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k4GwzAWfR3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IjSOMGGz0w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k4GwzAWfR3 ++ cat /tmp/tmp.IjSOMGGz0w ++ rm /tmp/tmp.k4GwzAWfR3 /tmp/tmp.IjSOMGGz0w ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.VIx9OXkHeg ++ mktemp + local LAST_ERR=/tmp/tmp.8BREiZYaeK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VIx9OXkHeg + cat /tmp/tmp.8BREiZYaeK + rm /tmp/tmp.VIx9OXkHeg /tmp/tmp.8BREiZYaeK + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:28:07+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q18w9UIwhm +++ mktemp ++ local LAST_ERR=/tmp/tmp.tB7mM4rb3k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q18w9UIwhm ++ cat /tmp/tmp.tB7mM4rb3k ++ rm /tmp/tmp.q18w9UIwhm /tmp/tmp.tB7mM4rb3k ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fWQnEvI6ip ++ mktemp + local LAST_ERR=/tmp/tmp.wYK4nGgKqu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fWQnEvI6ip + cat /tmp/tmp.wYK4nGgKqu + rm /tmp/tmp.fWQnEvI6ip /tmp/tmp.wYK4nGgKqu + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:28:09+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m1X7h6Y9Pc +++ mktemp ++ local LAST_ERR=/tmp/tmp.f9lFGLdeAs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m1X7h6Y9Pc ++ cat /tmp/tmp.f9lFGLdeAs ++ rm /tmp/tmp.m1X7h6Y9Pc /tmp/tmp.f9lFGLdeAs ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zWVmanZ9s9 ++ mktemp + local LAST_ERR=/tmp/tmp.RH2VVx1eoF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zWVmanZ9s9 + cat /tmp/tmp.RH2VVx1eoF + rm /tmp/tmp.zWVmanZ9s9 /tmp/tmp.RH2VVx1eoF + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + run_restore backup-gcp-cs + local backup_name=backup-gcp-cs + log 'drop collection' + set +o xtrace [2025-08-06T20:28:11+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dIMLxsz932 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ExMdt0lEKI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dIMLxsz932 ++ cat /tmp/tmp.ExMdt0lEKI ++ rm /tmp/tmp.dIMLxsz932 /tmp/tmp.ExMdt0lEKI ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.OWeuQuT4Kb ++ mktemp + local LAST_ERR=/tmp/tmp.RqsFCv7yLH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OWeuQuT4Kb Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4f39c91e-87e1-4884-a3cd-64af2927d6e3") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.RqsFCv7yLH + rm /tmp/tmp.OWeuQuT4Kb /tmp/tmp.RqsFCv7yLH + return 0 + log 'check backup and restore -- backup-gcp-cs' + set +o xtrace [2025-08-06T20:28:13+0000] check backup and restore -- backup-gcp-cs + /usr/bin/sed -e 's/name:/name: restore-backup-gcp-cs/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-gcp-cs/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.pvcAb0GVHK ++ mktemp + local LAST_ERR=/tmp/tmp.PfkAM9ddr4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pvcAb0GVHK perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs created + cat /tmp/tmp.PfkAM9ddr4 + rm /tmp/tmp.pvcAb0GVHK /tmp/tmp.PfkAM9ddr4 + return 0 + run_recovery_check backup-gcp-cs + local backup_name=backup-gcp-cs + local compare_suffix=_restore + local base=true + wait_restore backup-gcp-cs some-name requested 0 3000 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "requested" .OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-13242", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.c7IsYBFSlz ++ mktemp + local LAST_ERR=/tmp/tmp.y9XEwG1KKS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c7IsYBFSlz + cat /tmp/tmp.y9XEwG1KKS + rm /tmp/tmp.c7IsYBFSlz /tmp/tmp.y9XEwG1KKS + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + wait_restore backup-gcp-cs some-name ready 0 1800 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bmXfcXKMrC +++ mktemp ++ local LAST_ERR=/tmp/tmp.eOWi6kYNHl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bmXfcXKMrC ++ cat /tmp/tmp.eOWi6kYNHl ++ rm /tmp/tmp.bmXfcXKMrC /tmp/tmp.eOWi6kYNHl ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u3KwN357fU +++ mktemp ++ local LAST_ERR=/tmp/tmp.x1KyDd7kqj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u3KwN357fU ++ cat /tmp/tmp.x1KyDd7kqj ++ rm /tmp/tmp.u3KwN357fU /tmp/tmp.x1KyDd7kqj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.umzyqnMSgL +++ mktemp ++ local LAST_ERR=/tmp/tmp.smSimwUe94 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.umzyqnMSgL ++ cat /tmp/tmp.smSimwUe94 ++ rm /tmp/tmp.umzyqnMSgL /tmp/tmp.smSimwUe94 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JgsKFOPoCM +++ mktemp ++ local LAST_ERR=/tmp/tmp.U03dE5d12Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JgsKFOPoCM ++ cat /tmp/tmp.U03dE5d12Y ++ rm /tmp/tmp.JgsKFOPoCM /tmp/tmp.U03dE5d12Y ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zrEz12NHSj +++ mktemp ++ local LAST_ERR=/tmp/tmp.yiy83iVDQy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zrEz12NHSj ++ cat /tmp/tmp.yiy83iVDQy ++ rm /tmp/tmp.zrEz12NHSj /tmp/tmp.yiy83iVDQy ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C2VfqNRFWl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mh7AX3cg9w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C2VfqNRFWl ++ cat /tmp/tmp.Mh7AX3cg9w ++ rm /tmp/tmp.C2VfqNRFWl /tmp/tmp.Mh7AX3cg9w ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mciNHiba1e +++ mktemp ++ local LAST_ERR=/tmp/tmp.dXCzkRFuIX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mciNHiba1e ++ cat /tmp/tmp.dXCzkRFuIX ++ rm /tmp/tmp.mciNHiba1e /tmp/tmp.dXCzkRFuIX ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ab8XSyP1MG +++ mktemp ++ local LAST_ERR=/tmp/tmp.qgU3vDdUR7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ab8XSyP1MG ++ cat /tmp/tmp.qgU3vDdUR7 ++ rm /tmp/tmp.Ab8XSyP1MG /tmp/tmp.qgU3vDdUR7 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zi8K1fQPpI +++ mktemp ++ local LAST_ERR=/tmp/tmp.9msvWDtSkm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zi8K1fQPpI ++ cat /tmp/tmp.9msvWDtSkm ++ rm /tmp/tmp.zi8K1fQPpI /tmp/tmp.9msvWDtSkm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3XftsnRVxE +++ mktemp ++ local LAST_ERR=/tmp/tmp.0GhIkxdjcH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3XftsnRVxE ++ cat /tmp/tmp.0GhIkxdjcH ++ rm /tmp/tmp.3XftsnRVxE /tmp/tmp.0GhIkxdjcH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cwiMMNjG1r +++ mktemp ++ local LAST_ERR=/tmp/tmp.foJmwZ6tum ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cwiMMNjG1r ++ cat /tmp/tmp.foJmwZ6tum ++ rm /tmp/tmp.cwiMMNjG1r /tmp/tmp.foJmwZ6tum ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jPWlFQA1AF +++ mktemp ++ local LAST_ERR=/tmp/tmp.gWd4eBOcxD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jPWlFQA1AF ++ cat /tmp/tmp.gWd4eBOcxD ++ rm /tmp/tmp.jPWlFQA1AF /tmp/tmp.gWd4eBOcxD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FhGbCZ8nie +++ mktemp ++ local LAST_ERR=/tmp/tmp.8zl5O2biED ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FhGbCZ8nie ++ cat /tmp/tmp.8zl5O2biED ++ rm /tmp/tmp.FhGbCZ8nie /tmp/tmp.8zl5O2biED ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:34:04+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z4deeiNZfl +++ mktemp ++ local LAST_ERR=/tmp/tmp.2NRABd522M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z4deeiNZfl ++ cat /tmp/tmp.2NRABd522M ++ rm /tmp/tmp.Z4deeiNZfl /tmp/tmp.2NRABd522M ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.dShtKTXZiX ++ mktemp + local LAST_ERR=/tmp/tmp.f6xZkshL9w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dShtKTXZiX + cat /tmp/tmp.f6xZkshL9w + rm /tmp/tmp.dShtKTXZiX /tmp/tmp.f6xZkshL9w + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:34:06+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GG1J77lEGE +++ mktemp ++ local LAST_ERR=/tmp/tmp.gJmAEA0g39 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GG1J77lEGE ++ cat /tmp/tmp.gJmAEA0g39 ++ rm /tmp/tmp.GG1J77lEGE /tmp/tmp.gJmAEA0g39 ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wvu5RVDiAB ++ mktemp + local LAST_ERR=/tmp/tmp.kKqDyUletq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wvu5RVDiAB + cat /tmp/tmp.kKqDyUletq + rm /tmp/tmp.wvu5RVDiAB /tmp/tmp.kKqDyUletq + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:34:07+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UT95D3xlT4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q87k1J4OSr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UT95D3xlT4 ++ cat /tmp/tmp.Q87k1J4OSr ++ rm /tmp/tmp.UT95D3xlT4 /tmp/tmp.Q87k1J4OSr ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jmKW9Rg4gD ++ mktemp + local LAST_ERR=/tmp/tmp.rPyLxFFL9l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jmKW9Rg4gD + cat /tmp/tmp.rPyLxFFL9l + rm /tmp/tmp.jmKW9Rg4gD /tmp/tmp.rPyLxFFL9l + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + run_restore backup-azure-blob + local backup_name=backup-azure-blob + log 'drop collection' + set +o xtrace [2025-08-06T20:34:09+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.31K68lnhYF +++ mktemp ++ local LAST_ERR=/tmp/tmp.n9SFcbqtYm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.31K68lnhYF ++ cat /tmp/tmp.n9SFcbqtYm ++ rm /tmp/tmp.31K68lnhYF /tmp/tmp.n9SFcbqtYm ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.g6hHROJwVZ ++ mktemp + local LAST_ERR=/tmp/tmp.npumpr3F0X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g6hHROJwVZ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("34580709-347e-4d26-9e13-c4f85e5dd317") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.npumpr3F0X + rm /tmp/tmp.g6hHROJwVZ /tmp/tmp.npumpr3F0X + return 0 + log 'check backup and restore -- backup-azure-blob' + set +o xtrace [2025-08-06T20:34:11+0000] check backup and restore -- backup-azure-blob + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-azure-blob/' + /usr/bin/sed -e 's/name:/name: restore-backup-azure-blob/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.10JQrFQAPe ++ mktemp + local LAST_ERR=/tmp/tmp.pIrmjwfS0O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.10JQrFQAPe perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob created + cat /tmp/tmp.pIrmjwfS0O + rm /tmp/tmp.10JQrFQAPe /tmp/tmp.pIrmjwfS0O + return 0 + run_recovery_check backup-azure-blob + local backup_name=backup-azure-blob + local compare_suffix=_restore + local base=true + wait_restore backup-azure-blob some-name requested 0 3000 + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob to reach state "requested" .OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-13242", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.CQMAuGcNch ++ mktemp + local LAST_ERR=/tmp/tmp.wQtDitv6Nz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CQMAuGcNch + cat /tmp/tmp.wQtDitv6Nz + rm /tmp/tmp.CQMAuGcNch /tmp/tmp.wQtDitv6Nz + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + wait_restore backup-azure-blob some-name ready 0 1800 + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a1EVgsShps +++ mktemp ++ local LAST_ERR=/tmp/tmp.TV1dCi7S0I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a1EVgsShps ++ cat /tmp/tmp.TV1dCi7S0I ++ rm /tmp/tmp.a1EVgsShps /tmp/tmp.TV1dCi7S0I ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7u4uCObWCI +++ mktemp ++ local LAST_ERR=/tmp/tmp.xIQyhYcooU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7u4uCObWCI ++ cat /tmp/tmp.xIQyhYcooU ++ rm /tmp/tmp.7u4uCObWCI /tmp/tmp.xIQyhYcooU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qKDtA4BbMG +++ mktemp ++ local LAST_ERR=/tmp/tmp.D3XXppcT2L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qKDtA4BbMG ++ cat /tmp/tmp.D3XXppcT2L ++ rm /tmp/tmp.qKDtA4BbMG /tmp/tmp.D3XXppcT2L ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wvxZ63gVvB +++ mktemp ++ local LAST_ERR=/tmp/tmp.eEpITrCeMc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wvxZ63gVvB ++ cat /tmp/tmp.eEpITrCeMc ++ rm /tmp/tmp.wvxZ63gVvB /tmp/tmp.eEpITrCeMc ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4nBb8Hnoi7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kZIAnMSwfv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4nBb8Hnoi7 ++ cat /tmp/tmp.kZIAnMSwfv ++ rm /tmp/tmp.4nBb8Hnoi7 /tmp/tmp.kZIAnMSwfv ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ffC2ieHkUX +++ mktemp ++ local LAST_ERR=/tmp/tmp.mqkoV1gt4R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ffC2ieHkUX ++ cat /tmp/tmp.mqkoV1gt4R ++ rm /tmp/tmp.ffC2ieHkUX /tmp/tmp.mqkoV1gt4R ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iaEmzt7nV3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4coWRx557d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iaEmzt7nV3 ++ cat /tmp/tmp.4coWRx557d ++ rm /tmp/tmp.iaEmzt7nV3 /tmp/tmp.4coWRx557d ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5Ohh8GiAEv +++ mktemp ++ local LAST_ERR=/tmp/tmp.pThWFB6h3q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5Ohh8GiAEv ++ cat /tmp/tmp.pThWFB6h3q ++ rm /tmp/tmp.5Ohh8GiAEv /tmp/tmp.pThWFB6h3q ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kKPwWDtEYU +++ mktemp ++ local LAST_ERR=/tmp/tmp.J8lnvagaxN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kKPwWDtEYU ++ cat /tmp/tmp.J8lnvagaxN ++ rm /tmp/tmp.kKPwWDtEYU /tmp/tmp.J8lnvagaxN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GLbBWdjweJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.bYfomm90py ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GLbBWdjweJ ++ cat /tmp/tmp.bYfomm90py ++ rm /tmp/tmp.GLbBWdjweJ /tmp/tmp.bYfomm90py ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SlP0GgOmp1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ncbekd9bJ7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SlP0GgOmp1 ++ cat /tmp/tmp.ncbekd9bJ7 ++ rm /tmp/tmp.SlP0GgOmp1 /tmp/tmp.ncbekd9bJ7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fyudtSnHSO +++ mktemp ++ local LAST_ERR=/tmp/tmp.m8LHenkrO1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fyudtSnHSO ++ cat /tmp/tmp.m8LHenkrO1 ++ rm /tmp/tmp.fyudtSnHSO /tmp/tmp.m8LHenkrO1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SD05gXTrII +++ mktemp ++ local LAST_ERR=/tmp/tmp.oayWKzPyH9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SD05gXTrII ++ cat /tmp/tmp.oayWKzPyH9 ++ rm /tmp/tmp.SD05gXTrII /tmp/tmp.oayWKzPyH9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xt7E5dwWYX +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fuq91AhXw6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xt7E5dwWYX ++ cat /tmp/tmp.Fuq91AhXw6 ++ rm /tmp/tmp.xt7E5dwWYX /tmp/tmp.Fuq91AhXw6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DE5EAuY1Al +++ mktemp ++ local LAST_ERR=/tmp/tmp.clr0Qerz33 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DE5EAuY1Al ++ cat /tmp/tmp.clr0Qerz33 ++ rm /tmp/tmp.DE5EAuY1Al /tmp/tmp.clr0Qerz33 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish. + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:40:31+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HetzeqoqaS +++ mktemp ++ local LAST_ERR=/tmp/tmp.gVcTBiUDkR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HetzeqoqaS ++ cat /tmp/tmp.gVcTBiUDkR ++ rm /tmp/tmp.HetzeqoqaS /tmp/tmp.gVcTBiUDkR ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.dJULDrNAC1 ++ mktemp + local LAST_ERR=/tmp/tmp.82K9L7luHU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dJULDrNAC1 + cat /tmp/tmp.82K9L7luHU + rm /tmp/tmp.dJULDrNAC1 /tmp/tmp.82K9L7luHU + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:40:33+0000] running db.test.find() in myApp + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z9XTJHhb07 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hpgi6NrGgk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z9XTJHhb07 ++ cat /tmp/tmp.hpgi6NrGgk ++ rm /tmp/tmp.Z9XTJHhb07 /tmp/tmp.hpgi6NrGgk ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vNkjPvjiRN ++ mktemp + local LAST_ERR=/tmp/tmp.nKvCDiCsgW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vNkjPvjiRN + cat /tmp/tmp.nKvCDiCsgW + rm /tmp/tmp.vNkjPvjiRN /tmp/tmp.nKvCDiCsgW + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:40:35+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6f9bGaEfv9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vVj8K27CiK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6f9bGaEfv9 ++ cat /tmp/tmp.vVj8K27CiK ++ rm /tmp/tmp.6f9bGaEfv9 /tmp/tmp.vVj8K27CiK ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.VASA4uw3IP ++ mktemp + local LAST_ERR=/tmp/tmp.Py5t1DotWk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VASA4uw3IP + cat /tmp/tmp.Py5t1DotWk + rm /tmp/tmp.VASA4uw3IP /tmp/tmp.Py5t1DotWk + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + run_restore backup-minio-not-base + local backup_name=backup-minio-not-base + log 'drop collection' + set +o xtrace [2025-08-06T20:40:37+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fygu5EDPof +++ mktemp ++ local LAST_ERR=/tmp/tmp.5brWgZuJog ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fygu5EDPof ++ cat /tmp/tmp.5brWgZuJog ++ rm /tmp/tmp.Fygu5EDPof /tmp/tmp.5brWgZuJog ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kchueZIBLv ++ mktemp + local LAST_ERR=/tmp/tmp.RUbFrBtNXe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kchueZIBLv Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6337f03b-1929-459d-87be-6eaa098b1196") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.RUbFrBtNXe + rm /tmp/tmp.kchueZIBLv /tmp/tmp.RUbFrBtNXe + return 0 + log 'check backup and restore -- backup-minio-not-base' + set +o xtrace [2025-08-06T20:40:40+0000] check backup and restore -- backup-minio-not-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-not-base/' + kubectl_bin apply -f - + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-not-base/' ++ mktemp + local LAST_OUT=/tmp/tmp.jnCmFKfTAf ++ mktemp + local LAST_ERR=/tmp/tmp.T6BrAuck5K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jnCmFKfTAf perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-not-base created + cat /tmp/tmp.T6BrAuck5K + rm /tmp/tmp.jnCmFKfTAf /tmp/tmp.T6BrAuck5K + return 0 + run_recovery_check backup-minio-not-base '' false + local backup_name=backup-minio-not-base + local compare_suffix=_restore + local base=false + wait_restore backup-minio-not-base some-name requested 0 3000 + local backup_name=backup-minio-not-base + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base to reach state "requested" .OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-13242", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.LMAUtLEejU ++ mktemp + local LAST_ERR=/tmp/tmp.4X1i2NQ0sY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LMAUtLEejU + cat /tmp/tmp.4X1i2NQ0sY + rm /tmp/tmp.LMAUtLEejU /tmp/tmp.4X1i2NQ0sY + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + wait_restore backup-minio-not-base some-name ready 0 1800 + local backup_name=backup-minio-not-base + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cjWGCXwqfd +++ mktemp ++ local LAST_ERR=/tmp/tmp.GKpKJpEdsu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cjWGCXwqfd ++ cat /tmp/tmp.GKpKJpEdsu ++ rm /tmp/tmp.cjWGCXwqfd /tmp/tmp.GKpKJpEdsu ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cqBmZ0QS0X +++ mktemp ++ local LAST_ERR=/tmp/tmp.3OeYxkCKBw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cqBmZ0QS0X ++ cat /tmp/tmp.3OeYxkCKBw ++ rm /tmp/tmp.cqBmZ0QS0X /tmp/tmp.3OeYxkCKBw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SfBHhO0TWJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.XsICkq9Xsn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SfBHhO0TWJ ++ cat /tmp/tmp.XsICkq9Xsn ++ rm /tmp/tmp.SfBHhO0TWJ /tmp/tmp.XsICkq9Xsn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dz8p9j8Qjp +++ mktemp ++ local LAST_ERR=/tmp/tmp.hx7sYjvGZC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dz8p9j8Qjp ++ cat /tmp/tmp.hx7sYjvGZC ++ rm /tmp/tmp.dz8p9j8Qjp /tmp/tmp.hx7sYjvGZC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.czyfnYhXdo +++ mktemp ++ local LAST_ERR=/tmp/tmp.aUC0bKPMh3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.czyfnYhXdo ++ cat /tmp/tmp.aUC0bKPMh3 ++ rm /tmp/tmp.czyfnYhXdo /tmp/tmp.aUC0bKPMh3 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0vx50uTBuD +++ mktemp ++ local LAST_ERR=/tmp/tmp.oNm10K37QK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0vx50uTBuD ++ cat /tmp/tmp.oNm10K37QK ++ rm /tmp/tmp.0vx50uTBuD /tmp/tmp.oNm10K37QK ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VYQYXExptC +++ mktemp ++ local LAST_ERR=/tmp/tmp.oWn1Y3rMTn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VYQYXExptC ++ cat /tmp/tmp.oWn1Y3rMTn ++ rm /tmp/tmp.VYQYXExptC /tmp/tmp.oWn1Y3rMTn ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N5xgbB6zLS +++ mktemp ++ local LAST_ERR=/tmp/tmp.GPGDKbDjJD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N5xgbB6zLS ++ cat /tmp/tmp.GPGDKbDjJD ++ rm /tmp/tmp.N5xgbB6zLS /tmp/tmp.GPGDKbDjJD ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2SVuIj1lf0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8HbHgXljMp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2SVuIj1lf0 ++ cat /tmp/tmp.8HbHgXljMp ++ rm /tmp/tmp.2SVuIj1lf0 /tmp/tmp.8HbHgXljMp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NpfTenvS48 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1V0VkF9oS1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NpfTenvS48 ++ cat /tmp/tmp.1V0VkF9oS1 ++ rm /tmp/tmp.NpfTenvS48 /tmp/tmp.1V0VkF9oS1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YRFbtK3Nxt +++ mktemp ++ local LAST_ERR=/tmp/tmp.IUdBrP9G58 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YRFbtK3Nxt ++ cat /tmp/tmp.IUdBrP9G58 ++ rm /tmp/tmp.YRFbtK3Nxt /tmp/tmp.IUdBrP9G58 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.utZsp5jYJq +++ mktemp ++ local LAST_ERR=/tmp/tmp.iFjlFFM3zU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.utZsp5jYJq ++ cat /tmp/tmp.iFjlFFM3zU ++ rm /tmp/tmp.utZsp5jYJq /tmp/tmp.iFjlFFM3zU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TmMggoVeK7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sg2J8kQoyj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TmMggoVeK7 ++ cat /tmp/tmp.sg2J8kQoyj ++ rm /tmp/tmp.TmMggoVeK7 /tmp/tmp.sg2J8kQoyj ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ false == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:46:26+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wiJtqnMfw1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xwcsFjEYIc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wiJtqnMfw1 ++ cat /tmp/tmp.xwcsFjEYIc ++ rm /tmp/tmp.wiJtqnMfw1 /tmp/tmp.xwcsFjEYIc ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.4kUcAI1k5N ++ mktemp + local LAST_ERR=/tmp/tmp.ld6F2RU0h7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4kUcAI1k5N + cat /tmp/tmp.ld6F2RU0h7 + rm /tmp/tmp.4kUcAI1k5N /tmp/tmp.ld6F2RU0h7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.U3H0MN62p3/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:46:28+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uj5EgHijfT +++ mktemp ++ local LAST_ERR=/tmp/tmp.9nOaQjBN6d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uj5EgHijfT ++ cat /tmp/tmp.9nOaQjBN6d ++ rm /tmp/tmp.Uj5EgHijfT /tmp/tmp.9nOaQjBN6d ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PBsmq7yn8e ++ mktemp + local LAST_ERR=/tmp/tmp.sHcQHzHoKa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PBsmq7yn8e + cat /tmp/tmp.sHcQHzHoKa + rm /tmp/tmp.PBsmq7yn8e /tmp/tmp.sHcQHzHoKa + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.U3H0MN62p3/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:46:29+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W8AjEAoYqh +++ mktemp ++ local LAST_ERR=/tmp/tmp.7mei6cIsST ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W8AjEAoYqh ++ cat /tmp/tmp.7mei6cIsST ++ rm /tmp/tmp.W8AjEAoYqh /tmp/tmp.7mei6cIsST ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yRE4XC9IU6 ++ mktemp + local LAST_ERR=/tmp/tmp.w8WcJv3oIa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yRE4XC9IU6 + cat /tmp/tmp.w8WcJv3oIa + rm /tmp/tmp.yRE4XC9IU6 /tmp/tmp.w8WcJv3oIa + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.U3H0MN62p3/find-not-base + run_restore backup-minio + local backup_name=backup-minio + log 'drop collection' + set +o xtrace [2025-08-06T20:46:31+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nE3aD7wkWw +++ mktemp ++ local LAST_ERR=/tmp/tmp.p2ZK1g1M4w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nE3aD7wkWw ++ cat /tmp/tmp.p2ZK1g1M4w ++ rm /tmp/tmp.nE3aD7wkWw /tmp/tmp.p2ZK1g1M4w ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.11kyE4kOK3 ++ mktemp + local LAST_ERR=/tmp/tmp.8saNMEVkOp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.11kyE4kOK3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("77b376c1-3722-4e11-b80d-6d36d1f3ae55") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.8saNMEVkOp + rm /tmp/tmp.11kyE4kOK3 /tmp/tmp.8saNMEVkOp + return 0 + log 'check backup and restore -- backup-minio' + set +o xtrace [2025-08-06T20:46:33+0000] check backup and restore -- backup-minio + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/backupName:/backupName: backup-minio/' + /usr/bin/sed -e 's/name:/name: restore-backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.VzUS2Ko3zv ++ mktemp + local LAST_ERR=/tmp/tmp.Vgj3IOhKQ3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VzUS2Ko3zv perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.Vgj3IOhKQ3 + rm /tmp/tmp.VzUS2Ko3zv /tmp/tmp.Vgj3IOhKQ3 + return 0 + run_recovery_check backup-minio + local backup_name=backup-minio + local compare_suffix=_restore + local base=true + wait_restore backup-minio some-name requested 0 3000 + local backup_name=backup-minio + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be createdOK Waiting psmdb-restore/restore-backup-minio to reach state "requested" .OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-13242", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.w8H9Bv7yuu ++ mktemp + local LAST_ERR=/tmp/tmp.GP9qlxiFhT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w8H9Bv7yuu + cat /tmp/tmp.GP9qlxiFhT + rm /tmp/tmp.w8H9Bv7yuu /tmp/tmp.GP9qlxiFhT + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + wait_restore backup-minio some-name ready 0 1800 + local backup_name=backup-minio + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be createdOK Waiting psmdb-restore/restore-backup-minio to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6BeEcFjmSA +++ mktemp ++ local LAST_ERR=/tmp/tmp.6AbBhxBQRw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6BeEcFjmSA ++ cat /tmp/tmp.6AbBhxBQRw ++ rm /tmp/tmp.6BeEcFjmSA /tmp/tmp.6AbBhxBQRw ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kXI1UvgHNc +++ mktemp ++ local LAST_ERR=/tmp/tmp.qAsnPsnZ1q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kXI1UvgHNc ++ cat /tmp/tmp.qAsnPsnZ1q ++ rm /tmp/tmp.kXI1UvgHNc /tmp/tmp.qAsnPsnZ1q ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yBVLVZPFZf +++ mktemp ++ local LAST_ERR=/tmp/tmp.EBzt5eBvft ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yBVLVZPFZf ++ cat /tmp/tmp.EBzt5eBvft ++ rm /tmp/tmp.yBVLVZPFZf /tmp/tmp.EBzt5eBvft ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s65oC2YFqy +++ mktemp ++ local LAST_ERR=/tmp/tmp.W9nhO44pDc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s65oC2YFqy ++ cat /tmp/tmp.W9nhO44pDc ++ rm /tmp/tmp.s65oC2YFqy /tmp/tmp.W9nhO44pDc ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LBYzr0r9RG +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZtUalGkzpF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LBYzr0r9RG ++ cat /tmp/tmp.ZtUalGkzpF ++ rm /tmp/tmp.LBYzr0r9RG /tmp/tmp.ZtUalGkzpF ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EvmpB2OqGR +++ mktemp ++ local LAST_ERR=/tmp/tmp.LRCOPLs2yR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EvmpB2OqGR ++ cat /tmp/tmp.LRCOPLs2yR ++ rm /tmp/tmp.EvmpB2OqGR /tmp/tmp.LRCOPLs2yR ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uro30Eso8s +++ mktemp ++ local LAST_ERR=/tmp/tmp.2aCsaOxsh9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uro30Eso8s ++ cat /tmp/tmp.2aCsaOxsh9 ++ rm /tmp/tmp.uro30Eso8s /tmp/tmp.2aCsaOxsh9 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.foXuE1oDR1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mv0DnBylPf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.foXuE1oDR1 ++ cat /tmp/tmp.Mv0DnBylPf ++ rm /tmp/tmp.foXuE1oDR1 /tmp/tmp.Mv0DnBylPf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mMl85vsxgS +++ mktemp ++ local LAST_ERR=/tmp/tmp.PLRffUXKzQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mMl85vsxgS ++ cat /tmp/tmp.PLRffUXKzQ ++ rm /tmp/tmp.mMl85vsxgS /tmp/tmp.PLRffUXKzQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2MjU9o9rQH +++ mktemp ++ local LAST_ERR=/tmp/tmp.3rzhwp0eH0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2MjU9o9rQH ++ cat /tmp/tmp.3rzhwp0eH0 ++ rm /tmp/tmp.2MjU9o9rQH /tmp/tmp.3rzhwp0eH0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ADIUs1ZodH +++ mktemp ++ local LAST_ERR=/tmp/tmp.2TBSbIkmog ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ADIUs1ZodH ++ cat /tmp/tmp.2TBSbIkmog ++ rm /tmp/tmp.ADIUs1ZodH /tmp/tmp.2TBSbIkmog ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sz2e9geOzS +++ mktemp ++ local LAST_ERR=/tmp/tmp.0NCUNi1iOB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sz2e9geOzS ++ cat /tmp/tmp.0NCUNi1iOB ++ rm /tmp/tmp.sz2e9geOzS /tmp/tmp.0NCUNi1iOB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SoHmu0RVYj +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZvQCTa8dQy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SoHmu0RVYj ++ cat /tmp/tmp.ZvQCTa8dQy ++ rm /tmp/tmp.SoHmu0RVYj /tmp/tmp.ZvQCTa8dQy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qEI7spshdg +++ mktemp ++ local LAST_ERR=/tmp/tmp.WjkxpvfTvO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qEI7spshdg ++ cat /tmp/tmp.WjkxpvfTvO ++ rm /tmp/tmp.qEI7spshdg /tmp/tmp.WjkxpvfTvO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GNxB3SGMD3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rTvtbsTjh1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GNxB3SGMD3 ++ cat /tmp/tmp.rTvtbsTjh1 ++ rm /tmp/tmp.GNxB3SGMD3 /tmp/tmp.rTvtbsTjh1 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:52:35+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zIurhEiZuH +++ mktemp ++ local LAST_ERR=/tmp/tmp.EKJeZhAYlJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zIurhEiZuH ++ cat /tmp/tmp.EKJeZhAYlJ ++ rm /tmp/tmp.zIurhEiZuH /tmp/tmp.EKJeZhAYlJ ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QW0qS6GUTX ++ mktemp + local LAST_ERR=/tmp/tmp.b5ej37ROGr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QW0qS6GUTX + cat /tmp/tmp.b5ej37ROGr + rm /tmp/tmp.QW0qS6GUTX /tmp/tmp.b5ej37ROGr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:52:37+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tnBy3CeDMM +++ mktemp ++ local LAST_ERR=/tmp/tmp.SkjSovxWnh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tnBy3CeDMM ++ cat /tmp/tmp.SkjSovxWnh ++ rm /tmp/tmp.tnBy3CeDMM /tmp/tmp.SkjSovxWnh ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RJx1ELyDLn ++ mktemp + local LAST_ERR=/tmp/tmp.18q6GYmZNl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RJx1ELyDLn + cat /tmp/tmp.18q6GYmZNl + rm /tmp/tmp.RJx1ELyDLn /tmp/tmp.18q6GYmZNl + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T20:52:39+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DWviBeXdYv +++ mktemp ++ local LAST_ERR=/tmp/tmp.awV5SnfpBv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DWviBeXdYv ++ cat /tmp/tmp.awV5SnfpBv ++ rm /tmp/tmp.DWviBeXdYv /tmp/tmp.awV5SnfpBv ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZR7ykl8f7q ++ mktemp + local LAST_ERR=/tmp/tmp.2t2CoJq4Vz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZR7ykl8f7q + cat /tmp/tmp.2t2CoJq4Vz + rm /tmp/tmp.ZR7ykl8f7q /tmp/tmp.2t2CoJq4Vz + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + desc 'Testing with arbiter and non-voting nodes' + set +o xtrace ----------------------------------------------------------------------------------- Testing with arbiter and non-voting nodes ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1961-970eeaa4"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.LmBN9ntUqf ++ mktemp + local LAST_ERR=/tmp/tmp.btbzlzmTKU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LmBN9ntUqf perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.btbzlzmTKU + rm /tmp/tmp.LmBN9ntUqf /tmp/tmp.btbzlzmTKU + return 0 + log 'check if all pods started' + set +o xtrace [2025-08-06T20:52:44+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0NoZDwkjYs +++ mktemp ++ local LAST_ERR=/tmp/tmp.8hNUYn2yQk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0NoZDwkjYs ++ cat /tmp/tmp.8hNUYn2yQk ++ rm /tmp/tmp.0NoZDwkjYs /tmp/tmp.8hNUYn2yQk ++ return 0 + [[ true == \t\r\u\e ]] + wait_pod some-name-rs0-arbiter-0 + local pod=some-name-rs0-arbiter-0 + set +o xtrace waiting for pod/some-name-rs0-arbiter-0 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oenjcK7FKZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.PmnAmr0UUe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oenjcK7FKZ ++ cat /tmp/tmp.PmnAmr0UUe ++ rm /tmp/tmp.oenjcK7FKZ /tmp/tmp.PmnAmr0UUe ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iCKrYrGR6n +++ mktemp ++ local LAST_ERR=/tmp/tmp.WqPOmtpzEj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iCKrYrGR6n ++ cat /tmp/tmp.WqPOmtpzEj ++ rm /tmp/tmp.iCKrYrGR6n /tmp/tmp.WqPOmtpzEj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xZemCovL43 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ANa7nE1RMD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xZemCovL43 ++ cat /tmp/tmp.ANa7nE1RMD ++ rm /tmp/tmp.xZemCovL43 /tmp/tmp.ANa7nE1RMD ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + log 'running backups' + set +o xtrace [2025-08-06T20:53:25+0000] running backups + backup_name_minio=backup-minio-arbiter-nv + run_backup minio backup-minio-arbiter-nv + local storage=minio + local backup_name=backup-minio-arbiter-nv + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/bin/sed -e 's/storageName:/storageName: minio/' + /usr/bin/sed -e 's/name:/name: backup-minio-arbiter-nv/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Xx6AskqYgJ ++ mktemp + local LAST_ERR=/tmp/tmp.k6xhg1vaW4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xx6AskqYgJ perconaservermongodbbackup.psmdb.percona.com/backup-minio-arbiter-nv created + cat /tmp/tmp.k6xhg1vaW4 + rm /tmp/tmp.Xx6AskqYgJ /tmp/tmp.k6xhg1vaW4 + return 0 + wait_backup backup-minio-arbiter-nv + local backup_name=backup-minio-arbiter-nv + local target_state=ready + set +o xtrace waiting for backup-minio-arbiter-nv to reach ready state...... + check_backup_in_storage backup-minio-arbiter-nv minio rs0 + local backup=backup-minio-arbiter-nv + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ /usr/bin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ get_backup_dest backup-minio-arbiter-nv ++ local backup_name=backup-minio-arbiter-nv ++ kubectl_bin get psmdb-backup backup-minio-arbiter-nv -o 'jsonpath={.status.destination}' ++ sed 's|s3://||' ++ sed -e 's/.json$//' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T4fEDq9s6I +++ mktemp ++ local LAST_ERR=/tmp/tmp.pG3ScxTjdO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup backup-minio-arbiter-nv -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T4fEDq9s6I ++ cat /tmp/tmp.pG3ScxTjdO ++ rm /tmp/tmp.T4fEDq9s6I /tmp/tmp.pG3ScxTjdO ++ return 0 + backup_dest=operator-testing/2025-08-06T20:53:27Z + [[ minio == \m\i\n\i\o ]] + grep filelist.pbm + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.dKOrH1163F ++ mktemp + local LAST_ERR=/tmp/tmp.t7e4gWXKM1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dKOrH1163F + cat /tmp/tmp.t7e4gWXKM1 If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.dKOrH1163F /tmp/tmp.t7e4gWXKM1 + return 0 + sleep 1 + let retry+=1 + '[' 7 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.jN1g0mqt04 ++ mktemp + local LAST_ERR=/tmp/tmp.maVwU5SJoM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jN1g0mqt04 + cat /tmp/tmp.maVwU5SJoM If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.jN1g0mqt04 /tmp/tmp.maVwU5SJoM + return 0 + sleep 1 + let retry+=1 + '[' 8 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.31nOrRujfb ++ mktemp + local LAST_ERR=/tmp/tmp.NqMuJvcPFx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.31nOrRujfb + cat /tmp/tmp.NqMuJvcPFx If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.31nOrRujfb /tmp/tmp.NqMuJvcPFx + return 0 + sleep 1 + let retry+=1 + '[' 9 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.0vqNkVRHMW ++ mktemp + local LAST_ERR=/tmp/tmp.oOWsRanACo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-06T20:53:27Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0vqNkVRHMW + cat /tmp/tmp.oOWsRanACo + rm /tmp/tmp.0vqNkVRHMW /tmp/tmp.oOWsRanACo + return 0 2025-08-06 20:53:33 13932 filelist.pbm + run_restore backup-minio-arbiter-nv + local backup_name=backup-minio-arbiter-nv + log 'drop collection' + set +o xtrace [2025-08-06T20:54:01+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FRnsVUW3vW +++ mktemp ++ local LAST_ERR=/tmp/tmp.MtaIqYw3Jq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FRnsVUW3vW ++ cat /tmp/tmp.MtaIqYw3Jq ++ rm /tmp/tmp.FRnsVUW3vW /tmp/tmp.MtaIqYw3Jq ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.R9I7xM7erI ++ mktemp + local LAST_ERR=/tmp/tmp.krAL5D8RYS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R9I7xM7erI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-3.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-arbiter-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017,some-name-rs0-nv-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a07412a9-299d-4224-b211-f8a1447397ff") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.krAL5D8RYS + rm /tmp/tmp.R9I7xM7erI /tmp/tmp.krAL5D8RYS + return 0 + log 'check backup and restore -- backup-minio-arbiter-nv' + set +o xtrace [2025-08-06T20:54:03+0000] check backup and restore -- backup-minio-arbiter-nv + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/bin/sed -e 's/name:/name: restore-backup-minio-arbiter-nv/' + /usr/bin/sed -e 's/backupName:/backupName: backup-minio-arbiter-nv/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.eT7uwt11uX ++ mktemp + local LAST_ERR=/tmp/tmp.AJqggeg9JN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eT7uwt11uX perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-arbiter-nv created + cat /tmp/tmp.AJqggeg9JN + rm /tmp/tmp.eT7uwt11uX /tmp/tmp.AJqggeg9JN + return 0 + run_recovery_check backup-minio-arbiter-nv _restore-arbiter-nv + local backup_name=backup-minio-arbiter-nv + local compare_suffix=_restore-arbiter-nv + local base=true + wait_restore backup-minio-arbiter-nv some-name requested 0 3000 + local backup_name=backup-minio-arbiter-nv + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-arbiter-nv object to be createdOK Waiting psmdb-restore/restore-backup-minio-arbiter-nv to reach state "requested" ...OK after 3 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore-arbiter-nv + local resource=statefulset/some-name-rs0 + local postfix=_restore-arbiter-nv + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml + local new_result=/tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-13242", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.4Gs4I1SNCh ++ mktemp + local LAST_ERR=/tmp/tmp.nf3NkteCnY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4Gs4I1SNCh + cat /tmp/tmp.nf3NkteCnY + rm /tmp/tmp.4Gs4I1SNCh /tmp/tmp.nf3NkteCnY + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml /tmp/tmp.U3H0MN62p3/statefulset_some-name-rs0.yml + wait_restore backup-minio-arbiter-nv some-name ready 0 1800 + local backup_name=backup-minio-arbiter-nv + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-arbiter-nv object to be createdOK Waiting psmdb-restore/restore-backup-minio-arbiter-nv to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iM0bMa6mj0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rI0DAwqhPi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iM0bMa6mj0 ++ cat /tmp/tmp.rI0DAwqhPi ++ rm /tmp/tmp.iM0bMa6mj0 /tmp/tmp.rI0DAwqhPi ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0vR0oMP7DP +++ mktemp ++ local LAST_ERR=/tmp/tmp.qhMo6s0zuW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0vR0oMP7DP ++ cat /tmp/tmp.qhMo6s0zuW ++ rm /tmp/tmp.0vR0oMP7DP /tmp/tmp.qhMo6s0zuW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QDJbLi9qUe +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cu108IG920 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QDJbLi9qUe ++ cat /tmp/tmp.Cu108IG920 ++ rm /tmp/tmp.QDJbLi9qUe /tmp/tmp.Cu108IG920 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HlK6ohoqsL +++ mktemp ++ local LAST_ERR=/tmp/tmp.cktzETxDTR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HlK6ohoqsL ++ cat /tmp/tmp.cktzETxDTR ++ rm /tmp/tmp.HlK6ohoqsL /tmp/tmp.cktzETxDTR ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ujn7P5XxWu +++ mktemp ++ local LAST_ERR=/tmp/tmp.owijfLlurT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ujn7P5XxWu ++ cat /tmp/tmp.owijfLlurT ++ rm /tmp/tmp.ujn7P5XxWu /tmp/tmp.owijfLlurT ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E65ZZnrwGU +++ mktemp ++ local LAST_ERR=/tmp/tmp.wONg0MQU9E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E65ZZnrwGU ++ cat /tmp/tmp.wONg0MQU9E ++ rm /tmp/tmp.E65ZZnrwGU /tmp/tmp.wONg0MQU9E ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vQx4s61lh0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XQFmj2fp3i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vQx4s61lh0 ++ cat /tmp/tmp.XQFmj2fp3i ++ rm /tmp/tmp.vQx4s61lh0 /tmp/tmp.XQFmj2fp3i ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fLCy5USdSD +++ mktemp ++ local LAST_ERR=/tmp/tmp.KCsBImEkhm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fLCy5USdSD ++ cat /tmp/tmp.KCsBImEkhm ++ rm /tmp/tmp.fLCy5USdSD /tmp/tmp.KCsBImEkhm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lWCoBTd6tg +++ mktemp ++ local LAST_ERR=/tmp/tmp.rYzHzspph2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lWCoBTd6tg ++ cat /tmp/tmp.rYzHzspph2 ++ rm /tmp/tmp.lWCoBTd6tg /tmp/tmp.rYzHzspph2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TcYIUwMtM9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RGbXMZzapi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TcYIUwMtM9 ++ cat /tmp/tmp.RGbXMZzapi ++ rm /tmp/tmp.TcYIUwMtM9 /tmp/tmp.RGbXMZzapi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ttzJklofVX +++ mktemp ++ local LAST_ERR=/tmp/tmp.URPVs1e2pQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ttzJklofVX ++ cat /tmp/tmp.URPVs1e2pQ ++ rm /tmp/tmp.ttzJklofVX /tmp/tmp.URPVs1e2pQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CZwxflEuYY +++ mktemp ++ local LAST_ERR=/tmp/tmp.JUdAQPsTIV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CZwxflEuYY ++ cat /tmp/tmp.JUdAQPsTIV ++ rm /tmp/tmp.CZwxflEuYY /tmp/tmp.JUdAQPsTIV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DxxS4LVQQ5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BAeIv7Hsvu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DxxS4LVQQ5 ++ cat /tmp/tmp.BAeIv7Hsvu ++ rm /tmp/tmp.DxxS4LVQQ5 /tmp/tmp.BAeIv7Hsvu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RNH3ba7ZOJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.nsRakAjtOx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RNH3ba7ZOJ ++ cat /tmp/tmp.nsRakAjtOx ++ rm /tmp/tmp.RNH3ba7ZOJ /tmp/tmp.nsRakAjtOx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5yR37hbnLh +++ mktemp ++ local LAST_ERR=/tmp/tmp.LgG8W865yH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5yR37hbnLh ++ cat /tmp/tmp.LgG8W865yH ++ rm /tmp/tmp.5yR37hbnLh /tmp/tmp.LgG8W865yH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qpChENtYuv +++ mktemp ++ local LAST_ERR=/tmp/tmp.oThlQLanjr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qpChENtYuv ++ cat /tmp/tmp.oThlQLanjr ++ rm /tmp/tmp.qpChENtYuv /tmp/tmp.oThlQLanjr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DeA01SNM3q +++ mktemp ++ local LAST_ERR=/tmp/tmp.KdxNvJ0N4Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DeA01SNM3q ++ cat /tmp/tmp.KdxNvJ0N4Z ++ rm /tmp/tmp.DeA01SNM3q /tmp/tmp.KdxNvJ0N4Z ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3FbSEQCFdp +++ mktemp ++ local LAST_ERR=/tmp/tmp.G1BT88uV52 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3FbSEQCFdp ++ cat /tmp/tmp.G1BT88uV52 ++ rm /tmp/tmp.3FbSEQCFdp /tmp/tmp.G1BT88uV52 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T21:02:02+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iYPGgKAxzb +++ mktemp ++ local LAST_ERR=/tmp/tmp.ekeBPADhf8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iYPGgKAxzb ++ cat /tmp/tmp.ekeBPADhf8 ++ rm /tmp/tmp.iYPGgKAxzb /tmp/tmp.ekeBPADhf8 ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gez9JelCQ6 ++ mktemp + local LAST_ERR=/tmp/tmp.LGRbo0c9PR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gez9JelCQ6 + cat /tmp/tmp.LGRbo0c9PR + rm /tmp/tmp.gez9JelCQ6 /tmp/tmp.LGRbo0c9PR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T21:02:04+0000] running db.test.find() in myApp + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 mongodb '' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ITCK5rzjEc +++ mktemp ++ local LAST_ERR=/tmp/tmp.cRvpdyA4EI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ITCK5rzjEc ++ cat /tmp/tmp.cRvpdyA4EI ++ rm /tmp/tmp.ITCK5rzjEc /tmp/tmp.cRvpdyA4EI ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.J0T6Qabi7X ++ mktemp + local LAST_ERR=/tmp/tmp.vQ2pf6RJl0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J0T6Qabi7X + cat /tmp/tmp.vQ2pf6RJl0 + rm /tmp/tmp.J0T6Qabi7X /tmp/tmp.vQ2pf6RJl0 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-06T21:02:06+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VGWCvaKo7b +++ mktemp ++ local LAST_ERR=/tmp/tmp.lCwU5FErNZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VGWCvaKo7b ++ cat /tmp/tmp.lCwU5FErNZ ++ rm /tmp/tmp.VGWCvaKo7b /tmp/tmp.lCwU5FErNZ ++ return 0 + local client_container=psmdb-client-b9788d8bc-lqtzr + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RvstPW0FVm ++ mktemp + local LAST_ERR=/tmp/tmp.cQWpuURKby + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-b9788d8bc-lqtzr -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-13242.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RvstPW0FVm + cat /tmp/tmp.cQWpuURKby + rm /tmp/tmp.RvstPW0FVm /tmp/tmp.cQWpuURKby + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.U3H0MN62p3/find + destroy demand-backup-incremental-13242 + local namespace=demand-backup-incremental-13242 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.RpC6SSAvMG +++ mktemp ++ local LAST_ERR=/tmp/tmp.5shbp0TstS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RpC6SSAvMG ++ cat /tmp/tmp.5shbp0TstS ++ rm /tmp/tmp.RpC6SSAvMG /tmp/tmp.5shbp0TstS ++ return 0 + '[' 6 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.fAgSyeiueM ++ mktemp + local LAST_ERR=/tmp/tmp.Aov5zhxU1z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fAgSyeiueM NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-aws-s3 some-name aws-s3 s3://operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:19:54Z incremental-base 644.35KB ready 41m 42m backup-azure-blob some-name azure-blob azure://operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:38Z incremental-base 1.50MB ready 41m 42m backup-gcp-cs some-name gcp-cs s3://operator-testing/psmdb-demand-backup-incremental/2025-08-06T20:20:16Z incremental-base 1012.26KB ready 41m 42m backup-minio some-name minio s3://operator-testing/2025-08-06T20:21:02Z incremental-base 1.23MB ready 40m 41m backup-minio-arbiter-nv some-name minio s3://operator-testing/2025-08-06T20:53:27Z incremental-base 898.36KB ready 8m35s 8m42s backup-minio-not-base some-name minio s3://operator-testing/2025-08-06T20:21:33Z incremental 702.88KB ready 40m 40m + cat /tmp/tmp.Aov5zhxU1z + rm /tmp/tmp.fAgSyeiueM /tmp/tmp.Aov5zhxU1z + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.Q75PMbxV3J ++ mktemp + local LAST_ERR=/tmp/tmp.Q5lIgBNBar + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q75PMbxV3J perconaservermongodbbackup.psmdb.percona.com "backup-aws-s3" deleted perconaservermongodbbackup.psmdb.percona.com "backup-azure-blob" deleted perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-arbiter-nv" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-not-base" deleted + cat /tmp/tmp.Q5lIgBNBar + rm /tmp/tmp.Q75PMbxV3J /tmp/tmp.Q5lIgBNBar + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.QaaumUHxW7 ++ mktemp + local LAST_ERR=/tmp/tmp.4wikziNfmz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QaaumUHxW7 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.4wikziNfmz + rm /tmp/tmp.QaaumUHxW7 /tmp/tmp.4wikziNfmz + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.8Ku07XawbU ++ mktemp + local LAST_ERR=/tmp/tmp.1nw3gHKwON + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8Ku07XawbU + cat /tmp/tmp.1nw3gHKwON + rm /tmp/tmp.8Ku07XawbU /tmp/tmp.1nw3gHKwON + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.C0QZaSH5qD ++ mktemp + local LAST_ERR=/tmp/tmp.jCjUu2LHbw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C0QZaSH5qD + cat /tmp/tmp.jCjUu2LHbw + rm /tmp/tmp.C0QZaSH5qD /tmp/tmp.jCjUu2LHbw + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-incremental-13242 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qzOCqtAxAg ++ mktemp + local LAST_ERR=/tmp/tmp.x5truyFxnP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qzOCqtAxAg customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.x5truyFxnP + rm /tmp/tmp.qzOCqtAxAg /tmp/tmp.x5truyFxnP + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.cmQKMUQMVh ++ mktemp + local LAST_ERR=/tmp/tmp.LYQOeqLr55 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cmQKMUQMVh clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.LYQOeqLr55 + rm /tmp/tmp.cmQKMUQMVh /tmp/tmp.LYQOeqLr55 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hCIfcoVGBB ++ mktemp + local LAST_ERR=/tmp/tmp.O4fT6ONjTq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hCIfcoVGBB + cat /tmp/tmp.O4fT6ONjTq Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hCIfcoVGBB + cat /tmp/tmp.O4fT6ONjTq Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hCIfcoVGBB + cat /tmp/tmp.O4fT6ONjTq Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.hCIfcoVGBB + cat /tmp/tmp.O4fT6ONjTq Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.hCIfcoVGBB /tmp/tmp.O4fT6ONjTq + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.U3H0MN62p3 + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-incremental-13242 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.3zOfdMfkgs + local LAST_OUT=/tmp/tmp.sObQq6uEx2 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.5fY3yZStWL + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.2iCjpG0vBH + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-incremental-13242 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator