Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/logs/demand-backup-incremental-sharded.log grep: warning: stray \ before - WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-incremental-sharded-20877 + local ns=demand-backup-incremental-sharded-20877 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.EPa2jqzWzG ++ mktemp + local LAST_ERR=/tmp/tmp.aHrf0X4Mot + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EPa2jqzWzG customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.aHrf0X4Mot + rm /tmp/tmp.EPa2jqzWzG /tmp/tmp.aHrf0X4Mot + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19771 backup-aws-s3-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-aws-s3-sharded" not found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19771 backup-azure-blob-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19771 backup-gcp-cs-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-gcp-cs-sharded" not found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19771 backup-minio-not-base-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-minio-not-base-sharded" not found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19771 backup-minio-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.7GhZbyqgop ++ mktemp + local LAST_ERR=/tmp/tmp.mIDiOORuVW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7GhZbyqgop + cat /tmp/tmp.mIDiOORuVW + rm /tmp/tmp.7GhZbyqgop /tmp/tmp.mIDiOORuVW + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XSODy89Dng ++ mktemp + local LAST_ERR=/tmp/tmp.vBdPoVOQXu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XSODy89Dng + cat /tmp/tmp.vBdPoVOQXu + rm /tmp/tmp.XSODy89Dng /tmp/tmp.vBdPoVOQXu + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.IYnuLQt9TT ++ mktemp + local LAST_ERR=/tmp/tmp.H03irdn5Ps + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IYnuLQt9TT + cat /tmp/tmp.H03irdn5Ps + rm /tmp/tmp.IYnuLQt9TT /tmp/tmp.H03irdn5Ps + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.WdEnhSmN63 ++ mktemp + local LAST_ERR=/tmp/tmp.ZbsJEqyeCj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WdEnhSmN63 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.ZbsJEqyeCj + rm /tmp/tmp.WdEnhSmN63 /tmp/tmp.ZbsJEqyeCj + return 0 + check_crd_for_deletion PR-2009-645e0543 + local git_tag=PR-2009-645e0543 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2009-645e0543/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EDe44LIoNA +++ mktemp ++ local LAST_ERR=/tmp/tmp.DfROAWxKi7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.EDe44LIoNA ++ cat /tmp/tmp.DfROAWxKi7 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.EDe44LIoNA ++ cat /tmp/tmp.DfROAWxKi7 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.EDe44LIoNA ++ cat /tmp/tmp.DfROAWxKi7 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.EDe44LIoNA ++ cat /tmp/tmp.DfROAWxKi7 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.EDe44LIoNA /tmp/tmp.DfROAWxKi7 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.o5405OqDrD egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.3vubZalr5T ++ mktemp + local LAST_ERR=/tmp/tmp.0DLCbg8Snj + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.VoHbNKaGuK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o5405OqDrD + cat /tmp/tmp.0DLCbg8Snj + rm /tmp/tmp.o5405OqDrD /tmp/tmp.0DLCbg8Snj + return 0 namespace "demand-backup-incremental-sharded-19771" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3vubZalr5T namespace "psmdb-operator" deleted + cat /tmp/tmp.VoHbNKaGuK + rm /tmp/tmp.3vubZalr5T /tmp/tmp.VoHbNKaGuK + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3Y1188or0i ++ mktemp + local LAST_ERR=/tmp/tmp.lhv0IjtpeG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3Y1188or0i + cat /tmp/tmp.lhv0IjtpeG + rm /tmp/tmp.3Y1188or0i /tmp/tmp.lhv0IjtpeG + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.lCIktNRqQ7 ++ mktemp + local LAST_ERR=/tmp/tmp.WKY4PIg4Yr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lCIktNRqQ7 namespace/psmdb-operator created + cat /tmp/tmp.WKY4PIg4Yr + rm /tmp/tmp.lCIktNRqQ7 /tmp/tmp.WKY4PIg4Yr + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6Q1AmMchPT +++ mktemp ++ local LAST_ERR=/tmp/tmp.4W54e5bXVj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6Q1AmMchPT ++ cat /tmp/tmp.4W54e5bXVj ++ rm /tmp/tmp.6Q1AmMchPT /tmp/tmp.4W54e5bXVj ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-2-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3mzXevvsCi ++ mktemp + local LAST_ERR=/tmp/tmp.EDfbqGeqCy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-2-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3mzXevvsCi Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-2-cluster1" modified. + cat /tmp/tmp.EDfbqGeqCy + rm /tmp/tmp.3mzXevvsCi /tmp/tmp.EDfbqGeqCy + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.tZEy2f6YVH ++ mktemp + local LAST_ERR=/tmp/tmp.sW70ob13Pn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tZEy2f6YVH customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.sW70ob13Pn + rm /tmp/tmp.tZEy2f6YVH /tmp/tmp.sW70ob13Pn + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Yt6SbVo4mu ++ mktemp + local LAST_ERR=/tmp/tmp.H5DGF5QLSU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yt6SbVo4mu clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.H5DGF5QLSU + rm /tmp/tmp.Yt6SbVo4mu /tmp/tmp.H5DGF5QLSU + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2009-645e0543") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.MANucFWRVq ++ mktemp + local LAST_ERR=/tmp/tmp.gzRP7oPQO1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MANucFWRVq deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.gzRP7oPQO1 + rm /tmp/tmp.MANucFWRVq /tmp/tmp.gzRP7oPQO1 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.wRR6EcV8YI +++ mktemp ++ local LAST_ERR=/tmp/tmp.ki4aSqz5zp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wRR6EcV8YI ++ cat /tmp/tmp.ki4aSqz5zp ++ rm /tmp/tmp.wRR6EcV8YI /tmp/tmp.ki4aSqz5zp ++ return 0 + wait_pod percona-server-mongodb-operator-677cf67bbc-5qsfw + local pod=percona-server-mongodb-operator-677cf67bbc-5qsfw + set +o xtrace waiting for pod/percona-server-mongodb-operator-677cf67bbc-5qsfw to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hn1Xb1ySYC +++ mktemp ++ local LAST_ERR=/tmp/tmp.tCNDcJKFX1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Hn1Xb1ySYC ++ cat /tmp/tmp.tCNDcJKFX1 ++ rm /tmp/tmp.Hn1Xb1ySYC /tmp/tmp.tCNDcJKFX1 ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-677cf67bbc-5qsfw ++ mktemp + local LAST_OUT=/tmp/tmp.JJWQPQSJz2 ++ mktemp + local LAST_ERR=/tmp/tmp.sv88bZ62Oa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-677cf67bbc-5qsfw + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JJWQPQSJz2 + cat /tmp/tmp.sv88bZ62Oa + rm /tmp/tmp.JJWQPQSJz2 /tmp/tmp.sv88bZ62Oa + return 0 2025-08-07T08:25:47.397Z INFO setup Manager starting up {"gitCommit": "645e054370b1c94a71b245f25e4838f59389aa71", "gitBranch": "PR-2009-645e0543", "buildTime": "", "goVersion": "go1.24.6", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-incremental-sharded-20877 + local namespace=demand-backup-incremental-sharded-20877 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns ++ mktemp + desc 'cleaned up old namespaces demand-backup-incremental-sharded-20877' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-incremental-sharded-20877 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-incremental-sharded-20877 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.iErtD4OeNf egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.GCqH7D8rjo + local LAST_ERR=/tmp/tmp.6V4TNbOjvi + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.hPJlglxfcb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-incremental-sharded-20877 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iErtD4OeNf + cat /tmp/tmp.6V4TNbOjvi + rm /tmp/tmp.iErtD4OeNf /tmp/tmp.6V4TNbOjvi + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GCqH7D8rjo + cat /tmp/tmp.hPJlglxfcb + rm /tmp/tmp.GCqH7D8rjo /tmp/tmp.hPJlglxfcb + return 0 + kubectl_bin wait --for=delete namespace demand-backup-incremental-sharded-20877 ++ mktemp + local LAST_OUT=/tmp/tmp.RuP1RsB9Pu ++ mktemp + local LAST_ERR=/tmp/tmp.cpLyjbMg9o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-incremental-sharded-20877 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RuP1RsB9Pu + cat /tmp/tmp.cpLyjbMg9o + rm /tmp/tmp.RuP1RsB9Pu /tmp/tmp.cpLyjbMg9o + return 0 + desc 'create namespace demand-backup-incremental-sharded-20877' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-incremental-sharded-20877 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-incremental-sharded-20877 ++ mktemp + local LAST_OUT=/tmp/tmp.XBI4UdbHvM ++ mktemp + local LAST_ERR=/tmp/tmp.IM9CF9sUB0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-incremental-sharded-20877 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XBI4UdbHvM namespace/demand-backup-incremental-sharded-20877 created + cat /tmp/tmp.IM9CF9sUB0 + rm /tmp/tmp.XBI4UdbHvM /tmp/tmp.IM9CF9sUB0 + return 0 + set_kube_ctx demand-backup-incremental-sharded-20877 + local namespace=demand-backup-incremental-sharded-20877 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.uI85JrIHCn +++ mktemp ++ local LAST_ERR=/tmp/tmp.gieBp9Mlrl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uI85JrIHCn ++ cat /tmp/tmp.gieBp9Mlrl ++ rm /tmp/tmp.uI85JrIHCn /tmp/tmp.gieBp9Mlrl ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-2-cluster1 --namespace=demand-backup-incremental-sharded-20877 ++ mktemp + local LAST_OUT=/tmp/tmp.xIIUVAiIXj ++ mktemp + local LAST_ERR=/tmp/tmp.0suzSIkady + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-2-cluster1 --namespace=demand-backup-incremental-sharded-20877 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xIIUVAiIXj Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-2-cluster1" modified. + cat /tmp/tmp.0suzSIkady + rm /tmp/tmp.xIIUVAiIXj /tmp/tmp.0suzSIkady + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Aug 7 08:26:06 2025 NAMESPACE: demand-backup-incremental-sharded-20877 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-incremental-sharded-20877.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-incremental-sharded-20877 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-incremental-sharded-20877 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-incremental-sharded-20877 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-incremental-sharded-20877 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lNMfWvwXNE +++ mktemp ++ local LAST_ERR=/tmp/tmp.rvPYAXAr6g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lNMfWvwXNE ++ cat /tmp/tmp.rvPYAXAr6g ++ rm /tmp/tmp.lNMfWvwXNE /tmp/tmp.rvPYAXAr6g ++ return 0 + MINIO_POD=minio-service-86dfccd949-9ld68 + wait_pod minio-service-86dfccd949-9ld68 + local pod=minio-service-86dfccd949-9ld68 + set +o xtrace waiting for pod/minio-service-86dfccd949-9ld68 to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-sharded-20877.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.SYOisH3P1s ++ mktemp + local LAST_ERR=/tmp/tmp.hIDEzjMetA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-sharded-20877.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SYOisH3P1s service/minio-service created + cat /tmp/tmp.hIDEzjMetA + rm /tmp/tmp.SYOisH3P1s /tmp/tmp.hIDEzjMetA + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.cVOJSuIw3p ++ mktemp + local LAST_ERR=/tmp/tmp.49H6KwK6yb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cVOJSuIw3p make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.49H6KwK6yb If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-incremental-sharded-20877 + rm /tmp/tmp.cVOJSuIw3p /tmp/tmp.49H6KwK6yb + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6rEpbuIlgk ++ mktemp + local LAST_ERR=/tmp/tmp.73UwbRZDmR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6rEpbuIlgk secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.73UwbRZDmR + rm /tmp/tmp.6rEpbuIlgk /tmp/tmp.73UwbRZDmR + return 0 + desc 'Testing on sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on sharded cluster ----------------------------------------------------------------------------------- + log 'Creating PSMDB cluster' + set +o xtrace [2025-08-07T08:26:46+0000] Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.oXqikT4Cds ++ mktemp + local LAST_ERR=/tmp/tmp.vBkYy1kK5f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oXqikT4Cds secret/some-users created + cat /tmp/tmp.vBkYy1kK5f + rm /tmp/tmp.oXqikT4Cds /tmp/tmp.vBkYy1kK5f + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/some-name-sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/some-name-sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/some-name-sharded.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2009-645e0543"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.KCggcUWSi3 ++ mktemp + local LAST_ERR=/tmp/tmp.QZkNrkgNXL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KCggcUWSi3 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.QZkNrkgNXL + rm /tmp/tmp.KCggcUWSi3 /tmp/tmp.QZkNrkgNXL + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.zRVLo9niV1 ++ mktemp + local LAST_ERR=/tmp/tmp.TCvMJBEKwg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zRVLo9niV1 deployment.apps/psmdb-client created + cat /tmp/tmp.TCvMJBEKwg + rm /tmp/tmp.zRVLo9niV1 /tmp/tmp.TCvMJBEKwg + return 0 + log 'check if all pods started' + set +o xtrace [2025-08-07T08:26:52+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LtUKP4A4Y0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kIbQLMvfc4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LtUKP4A4Y0 ++ cat /tmp/tmp.kIbQLMvfc4 ++ rm /tmp/tmp.LtUKP4A4Y0 /tmp/tmp.kIbQLMvfc4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VsmpmxywKV +++ mktemp ++ local LAST_ERR=/tmp/tmp.LdKmDhFtet ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VsmpmxywKV ++ cat /tmp/tmp.LdKmDhFtet ++ rm /tmp/tmp.VsmpmxywKV /tmp/tmp.LdKmDhFtet ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mEhdjXWBaz +++ mktemp ++ local LAST_ERR=/tmp/tmp.lniVlxKWvi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mEhdjXWBaz ++ cat /tmp/tmp.lniVlxKWvi ++ rm /tmp/tmp.mEhdjXWBaz /tmp/tmp.lniVlxKWvi ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.umBc1hHbZ9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jlBVx8JLVk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.umBc1hHbZ9 ++ cat /tmp/tmp.jlBVx8JLVk ++ rm /tmp/tmp.umBc1hHbZ9 /tmp/tmp.jlBVx8JLVk ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tk33Z2bASs +++ mktemp ++ local LAST_ERR=/tmp/tmp.vKwWBIJxpM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tk33Z2bASs ++ cat /tmp/tmp.vKwWBIJxpM ++ rm /tmp/tmp.Tk33Z2bASs /tmp/tmp.vKwWBIJxpM ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U1qYlnzNjd +++ mktemp ++ local LAST_ERR=/tmp/tmp.K0f0Nc976r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U1qYlnzNjd ++ cat /tmp/tmp.K0f0Nc976r ++ rm /tmp/tmp.U1qYlnzNjd /tmp/tmp.K0f0Nc976r ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jjFN8ZhlQy +++ mktemp ++ local LAST_ERR=/tmp/tmp.d86Ngq4jUA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jjFN8ZhlQy ++ cat /tmp/tmp.d86Ngq4jUA ++ rm /tmp/tmp.jjFN8ZhlQy /tmp/tmp.d86Ngq4jUA ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OHuZIzyJkr +++ mktemp ++ local LAST_ERR=/tmp/tmp.miU4ogaPEB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OHuZIzyJkr ++ cat /tmp/tmp.miU4ogaPEB ++ rm /tmp/tmp.OHuZIzyJkr /tmp/tmp.miU4ogaPEB ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6Glu1wGJXM +++ mktemp ++ local LAST_ERR=/tmp/tmp.62h8G4Sv22 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6Glu1wGJXM ++ cat /tmp/tmp.62h8G4Sv22 ++ rm /tmp/tmp.6Glu1wGJXM /tmp/tmp.62h8G4Sv22 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g4bqC1aw8e +++ mktemp ++ local LAST_ERR=/tmp/tmp.upksqh7Uak ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g4bqC1aw8e ++ cat /tmp/tmp.upksqh7Uak ++ rm /tmp/tmp.g4bqC1aw8e /tmp/tmp.upksqh7Uak ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo ++ kubectl_bin get svc some-name-mongos '-o=jsonpath={.status}' ++ jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vipLhrCu0y +++ mktemp ++ local LAST_ERR=/tmp/tmp.iCHdrqlIin ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get svc some-name-mongos '-o=jsonpath={.status}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vipLhrCu0y ++ cat /tmp/tmp.iCHdrqlIin ++ rm /tmp/tmp.vipLhrCu0y /tmp/tmp.iCHdrqlIin ++ return 0 + lbEndpoint=34.30.88.110 + '[' -z 34.30.88.110 ']' + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1FXuEcaGAb +++ mktemp ++ local LAST_ERR=/tmp/tmp.OblZkmVz6P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1FXuEcaGAb ++ cat /tmp/tmp.OblZkmVz6P ++ rm /tmp/tmp.1FXuEcaGAb /tmp/tmp.OblZkmVz6P ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.n1yw5vO8B0 ++ mktemp + local LAST_ERR=/tmp/tmp.p1uQ6js503 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n1yw5vO8B0 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("9b88f96b-434e-4ba7-a1da-5a05643ee822") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.p1uQ6js503 + rm /tmp/tmp.n1yw5vO8B0 /tmp/tmp.p1uQ6js503 + return 0 + sleep 1 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WsbBAS4Vyx +++ mktemp ++ local LAST_ERR=/tmp/tmp.gKsi6DufJs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WsbBAS4Vyx ++ cat /tmp/tmp.gKsi6DufJs ++ rm /tmp/tmp.WsbBAS4Vyx /tmp/tmp.gKsi6DufJs ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NOFRr4PaYp ++ mktemp + local LAST_ERR=/tmp/tmp.hW9XrIH89C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NOFRr4PaYp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("0f6fd8c0-3ed4-43de-b9d4-6ae7c41d2490") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.hW9XrIH89C + rm /tmp/tmp.NOFRr4PaYp /tmp/tmp.hW9XrIH89C + return 0 + sleep 5 + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hKBj9kiXc4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.V4Db4xibvH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hKBj9kiXc4 ++ cat /tmp/tmp.V4Db4xibvH ++ rm /tmp/tmp.hKBj9kiXc4 /tmp/tmp.V4Db4xibvH ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.nDi5q9Q1sQ ++ mktemp + local LAST_ERR=/tmp/tmp.0b3UGPWv8Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nDi5q9Q1sQ + cat /tmp/tmp.0b3UGPWv8Z + rm /tmp/tmp.nDi5q9Q1sQ /tmp/tmp.0b3UGPWv8Z + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.nCNFgSHZFv/find-sharded + log 'waiting 60 seconds for stable timestamp in wiredtiger' + set +o xtrace [2025-08-07T08:30:17+0000] waiting 60 seconds for stable timestamp in wiredtiger + sleep 80 + log 'running backups' + set +o xtrace [2025-08-07T08:31:37+0000] running backups + '[' -z '' ']' + backup_name_aws=backup-aws-s3-sharded + backup_name_gcp=backup-gcp-cs-sharded + backup_name_azure=backup-azure-blob-sharded + run_backup aws-s3 backup-aws-s3-sharded + local storage=aws-s3 + local backup_name=backup-aws-s3-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-aws-s3-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: aws-s3/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.SrB49mIJpp ++ mktemp + local LAST_ERR=/tmp/tmp.hw1w9bqOrK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SrB49mIJpp perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded created + cat /tmp/tmp.hw1w9bqOrK + rm /tmp/tmp.SrB49mIJpp /tmp/tmp.hw1w9bqOrK + return 0 + run_backup gcp-cs backup-gcp-cs-sharded + local storage=gcp-cs + local backup_name=backup-gcp-cs-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-gcp-cs-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: gcp-cs/' + kubectl_bin apply -f - ++ mktemp + yq '.spec.type="incremental-base"' + local LAST_OUT=/tmp/tmp.hCyVwKVmY4 ++ mktemp + local LAST_ERR=/tmp/tmp.ItnbezKzvb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hCyVwKVmY4 perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs-sharded created + cat /tmp/tmp.ItnbezKzvb + rm /tmp/tmp.hCyVwKVmY4 /tmp/tmp.ItnbezKzvb + return 0 + run_backup azure-blob backup-azure-blob-sharded + local storage=azure-blob + local backup_name=backup-azure-blob-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-azure-blob-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: azure-blob/' + kubectl_bin apply -f - + yq '.spec.type="incremental-base"' ++ mktemp + local LAST_OUT=/tmp/tmp.Oz072jTFuq ++ mktemp + local LAST_ERR=/tmp/tmp.21rrqpvIcK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Oz072jTFuq perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded created + cat /tmp/tmp.21rrqpvIcK + rm /tmp/tmp.Oz072jTFuq /tmp/tmp.21rrqpvIcK + return 0 + wait_backup backup-aws-s3-sharded + local backup_name=backup-aws-s3-sharded + local target_state=ready + set +o xtrace waiting for backup-aws-s3-sharded to reach ready state........... + check_backup_in_storage backup-aws-s3-sharded s3 rs0 + local backup=backup-aws-s3-sharded + local storage_type=s3 + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=s3.amazonaws.com ++ get_backup_dest backup-aws-s3-sharded ++ local backup_name=backup-aws-s3-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.z4iUEJ9rE9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jN4qHDtw5V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z4iUEJ9rE9 ++ cat /tmp/tmp.jN4qHDtw5V ++ rm /tmp/tmp.z4iUEJ9rE9 /tmp/tmp.jN4qHDtw5V ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z + [[ s3 == \m\i\n\i\o ]] + local url=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/rs0/filelist.pbm + log 'checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-07T08:32:09+0000] checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/rs0/filelist.pbm exists + curl --fail --head https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 10127 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK x-amz-id-2: 8+0OKeP7LRgoM45zDi/a03uw5gyssUFM3uojiKPkEER7mwlMQUvQsOAPjVp98uxgCLymIQOSnHE= x-amz-request-id: R0H602S449FK1XG6 Date: Thu, 07 Aug 2025 08:32:10 GMT Last-Modified: Thu, 07 Aug 2025 08:31:57 GMT x-amz-expiration: expiry-date="Sat, 09 Aug 2025 00:00:00 GMT", rule-id="1 Days Cleanup" ETag: "2abd5e109d9d92c72a9ed405a7b657bf" x-amz-server-side-encryption: AES256 Accept-Ranges: bytes Content-Type: application/octet-stream Content-Length: 10127 Server: AmazonS3 + check_backup_in_storage backup-aws-s3-sharded s3 cfg + local backup=backup-aws-s3-sharded + local storage_type=s3 + local replset=cfg + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=s3.amazonaws.com ++ get_backup_dest backup-aws-s3-sharded ++ local backup_name=backup-aws-s3-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.GZXBTaNz96 +++ mktemp ++ local LAST_ERR=/tmp/tmp.O4ZMmsR3tp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GZXBTaNz96 ++ cat /tmp/tmp.O4ZMmsR3tp ++ rm /tmp/tmp.GZXBTaNz96 /tmp/tmp.O4ZMmsR3tp ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z + [[ s3 == \m\i\n\i\o ]] + local url=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/cfg/filelist.pbm + log 'checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/cfg/filelist.pbm exists' + set +o xtrace [2025-08-07T08:32:10+0000] checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/cfg/filelist.pbm exists + curl --fail --head https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z/cfg/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 18179 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 18179 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK x-amz-id-2: 8dfSBemEpXyjuXE8aTBNj07AKSDaa4CPB2jN9pfQ6I/TxaRO7fQpoVG0kIvcHm0fpngK7qFYJMTvh5JMIawKfvgzSY5mMR0X x-amz-request-id: 0G7RN8RXBXYT24D0 Date: Thu, 07 Aug 2025 08:32:11 GMT Last-Modified: Thu, 07 Aug 2025 08:32:03 GMT x-amz-expiration: expiry-date="Sat, 09 Aug 2025 00:00:00 GMT", rule-id="1 Days Cleanup" ETag: "cae0b32c681a84155af2ee5efbdc7713" x-amz-server-side-encryption: AES256 Accept-Ranges: bytes Content-Type: application/octet-stream Content-Length: 18179 Server: AmazonS3 + wait_backup backup-gcp-cs-sharded + local backup_name=backup-gcp-cs-sharded + local target_state=ready + set +o xtrace waiting for backup-gcp-cs-sharded to reach ready state......... + check_backup_in_storage backup-gcp-cs-sharded gcs rs0 + local backup=backup-gcp-cs-sharded + local storage_type=gcs + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=storage.googleapis.com ++ get_backup_dest backup-gcp-cs-sharded ++ local backup_name=backup-gcp-cs-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4mgLxIRL7m +++ mktemp ++ local LAST_ERR=/tmp/tmp.do2gc8tVoY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4mgLxIRL7m ++ cat /tmp/tmp.do2gc8tVoY ++ rm /tmp/tmp.4mgLxIRL7m /tmp/tmp.do2gc8tVoY ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z + [[ gcs == \m\i\n\i\o ]] + local url=https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/rs0/filelist.pbm + log 'checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-07T08:32:31+0000] checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/rs0/filelist.pbm exists + curl --fail --head https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 10067 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/2 200 content-type: application/octet-stream x-guploader-uploadid: ABgVH8-a8aVb5KnQtdRouGvW5haBL4EC_TkHpUvkKfCJZjz6yqlcQT0aPDRGHubPny5xHYeb expires: Thu, 07 Aug 2025 09:32:31 GMT date: Thu, 07 Aug 2025 08:32:31 GMT cache-control: public, max-age=3600 last-modified: Thu, 07 Aug 2025 08:32:21 GMT etag: "516ddc1bbba20c619f5681aa92b48a2f" x-goog-generation: 1754555541484493 x-goog-metageneration: 1 x-goog-stored-content-encoding: identity x-goog-stored-content-length: 10067 x-goog-hash: crc32c=RTUXmA== x-goog-hash: md5=UW3cG7uiDGGfVoGqkrSKLw== x-goog-expiration: Fri, 08 Aug 2025 08:32:21 GMT x-goog-storage-class: REGIONAL accept-ranges: bytes content-length: 10067 server: UploadServer alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + check_backup_in_storage backup-gcp-cs-sharded gcs cfg + local backup=backup-gcp-cs-sharded + local storage_type=gcs + local replset=cfg + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=storage.googleapis.com ++ get_backup_dest backup-gcp-cs-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ local backup_name=backup-gcp-cs-sharded ++ kubectl_bin get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.oY5D9dBs1n +++ mktemp ++ local LAST_ERR=/tmp/tmp.KEDgGV7yzu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oY5D9dBs1n ++ cat /tmp/tmp.KEDgGV7yzu ++ rm /tmp/tmp.oY5D9dBs1n /tmp/tmp.KEDgGV7yzu ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z + [[ gcs == \m\i\n\i\o ]] + local url=https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/cfg/filelist.pbm + log 'checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/cfg/filelist.pbm exists' + set +o xtrace [2025-08-07T08:32:32+0000] checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/cfg/filelist.pbm exists + curl --fail --head https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z/cfg/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 18179 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/2 200 content-type: application/octet-stream x-guploader-uploadid: ABgVH88yTuTAggvq88tU4Bz3J44hta2RCjdbfZZF7vAv5yxCNsWosbNrJX_E92pihAx-2BG9pTxyoIU expires: Thu, 07 Aug 2025 09:32:32 GMT date: Thu, 07 Aug 2025 08:32:32 GMT cache-control: public, max-age=3600 last-modified: Thu, 07 Aug 2025 08:32:25 GMT etag: "12671c25ccea682e5de510cc3d555bed" x-goog-generation: 1754555545869894 x-goog-metageneration: 1 x-goog-stored-content-encoding: identity x-goog-stored-content-length: 18179 x-goog-hash: crc32c=s0shDg== x-goog-hash: md5=EmccJczqaC5d5RDMPVVb7Q== x-goog-expiration: Fri, 08 Aug 2025 08:32:25 GMT x-goog-storage-class: REGIONAL accept-ranges: bytes content-length: 18179 server: UploadServer alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + wait_backup backup-azure-blob-sharded + local backup_name=backup-azure-blob-sharded + local target_state=ready + set +o xtrace waiting for backup-azure-blob-sharded to reach ready state......... + check_backup_in_storage backup-azure-blob-sharded azure rs0 + local backup=backup-azure-blob-sharded + local storage_type=azure + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net ++ get_backup_dest backup-azure-blob-sharded ++ local backup_name=backup-azure-blob-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|s3://||' ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.30kiuBgI7D +++ mktemp ++ local LAST_ERR=/tmp/tmp.8wAj7XnoJO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.30kiuBgI7D ++ cat /tmp/tmp.8wAj7XnoJO ++ rm /tmp/tmp.30kiuBgI7D /tmp/tmp.8wAj7XnoJO ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z + [[ azure == \m\i\n\i\o ]] + local url=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/rs0/filelist.pbm + log 'checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-07T08:32:53+0000] checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/rs0/filelist.pbm exists + curl --fail --head https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 12972 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK Content-Length: 12972 Content-Type: application/octet-stream Content-MD5: 5pnLiFptoEodHMq3OTi/Dg== Last-Modified: Thu, 07 Aug 2025 08:32:45 GMT ETag: 0x8DDD58CFBB11C2A Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 x-ms-request-id: 4377c5cf-001e-0036-4775-07521f000000 x-ms-version: 2009-09-19 x-ms-lease-status: unlocked x-ms-blob-type: BlockBlob Date: Thu, 07 Aug 2025 08:32:53 GMT + check_backup_in_storage backup-azure-blob-sharded azure cfg + local backup=backup-azure-blob-sharded + local storage_type=azure + local replset=cfg + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net ++ get_backup_dest backup-azure-blob-sharded ++ local backup_name=backup-azure-blob-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|s3://||' ++ local LAST_OUT=/tmp/tmp.Wrjr3R1pDo +++ mktemp ++ sed 's|azure://||' ++ local LAST_ERR=/tmp/tmp.WRqHK2okdY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wrjr3R1pDo ++ cat /tmp/tmp.WRqHK2okdY ++ rm /tmp/tmp.Wrjr3R1pDo /tmp/tmp.WRqHK2okdY ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z + [[ azure == \m\i\n\i\o ]] + local url=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/cfg/filelist.pbm + log 'checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/cfg/filelist.pbm exists' + set +o xtrace [2025-08-07T08:32:54+0000] checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/cfg/filelist.pbm exists + curl --fail --head https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z/cfg/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 18979 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK Content-Length: 18979 Content-Type: application/octet-stream Content-MD5: OAqR0phV89kT1dtVo3lMvg== Last-Modified: Thu, 07 Aug 2025 08:32:48 GMT ETag: 0x8DDD58CFD6A9A3E Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 x-ms-request-id: eaa698ae-301e-0002-4275-0761d7000000 x-ms-version: 2009-09-19 x-ms-lease-status: unlocked x-ms-blob-type: BlockBlob Date: Thu, 07 Aug 2025 08:32:54 GMT + backup_name_minio=backup-minio-sharded + run_backup minio backup-minio-sharded + local storage=minio + local backup_name=backup-minio-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.xRCPIBbeOR ++ mktemp + local LAST_ERR=/tmp/tmp.eWioTCvx5b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xRCPIBbeOR perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded created + cat /tmp/tmp.eWioTCvx5b + rm /tmp/tmp.xRCPIBbeOR /tmp/tmp.eWioTCvx5b + return 0 + wait_backup backup-minio-sharded + local backup_name=backup-minio-sharded + local target_state=ready + set +o xtrace waiting for backup-minio-sharded to reach ready state.......... + check_backup_in_storage backup-minio-sharded minio rs0 + local backup=backup-minio-sharded + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio-sharded ++ local backup_name=backup-minio-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|s3://||' ++ local LAST_OUT=/tmp/tmp.qB6zUwCrN8 +++ mktemp ++ sed 's|azure://||' ++ local LAST_ERR=/tmp/tmp.uEgnAMKmKl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qB6zUwCrN8 ++ cat /tmp/tmp.uEgnAMKmKl ++ rm /tmp/tmp.qB6zUwCrN8 /tmp/tmp.uEgnAMKmKl ++ return 0 + backup_dest=operator-testing/2025-08-07T08:32:58Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T08:32:58Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.5pp3bJsvX3 ++ mktemp + local LAST_ERR=/tmp/tmp.v1YMO5dEf5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T08:32:58Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5pp3bJsvX3 + cat /tmp/tmp.v1YMO5dEf5 + rm /tmp/tmp.5pp3bJsvX3 /tmp/tmp.v1YMO5dEf5 + return 0 2025-08-07 08:33:12 13772 filelist.pbm + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bXBDnfRB18 +++ mktemp ++ local LAST_ERR=/tmp/tmp.A8AswkYSmd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bXBDnfRB18 ++ cat /tmp/tmp.A8AswkYSmd ++ rm /tmp/tmp.bXBDnfRB18 /tmp/tmp.A8AswkYSmd ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.kbI2TUYlR5 ++ mktemp + local LAST_ERR=/tmp/tmp.g1CV1bEILx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kbI2TUYlR5 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("28227742-8e62-49c5-894b-b8bc83bfef12") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.g1CV1bEILx + rm /tmp/tmp.kbI2TUYlR5 /tmp/tmp.g1CV1bEILx + return 0 + sleep 5 + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 -not-base-sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local postfix=-not-base-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pEamKuqSnv +++ mktemp ++ local LAST_ERR=/tmp/tmp.GqYPmuIO58 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pEamKuqSnv ++ cat /tmp/tmp.GqYPmuIO58 ++ rm /tmp/tmp.pEamKuqSnv /tmp/tmp.GqYPmuIO58 ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.kx3tx7SaCB ++ mktemp + local LAST_ERR=/tmp/tmp.U1mAbMc9CI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kx3tx7SaCB + cat /tmp/tmp.U1mAbMc9CI + rm /tmp/tmp.kx3tx7SaCB /tmp/tmp.U1mAbMc9CI + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/find-not-base-sharded.json /tmp/tmp.nCNFgSHZFv/find-not-base-sharded + backup_name_minio_not_base=backup-minio-not-base-sharded + run_backup minio backup-minio-not-base-sharded false + local storage=minio + local backup_name=backup-minio-not-base-sharded + local base=false + local backup_type=incremental + [[ false == \t\r\u\e ]] + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-not-base-sharded/' + yq '.spec.type="incremental"' + kubectl_bin apply -f - + /usr/sbin/sed -e 's/storageName:/storageName: minio/' ++ mktemp + local LAST_OUT=/tmp/tmp.QrfII3jRSs ++ mktemp + local LAST_ERR=/tmp/tmp.SpqT6zfOMP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QrfII3jRSs perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base-sharded created + cat /tmp/tmp.SpqT6zfOMP + rm /tmp/tmp.QrfII3jRSs /tmp/tmp.SpqT6zfOMP + return 0 + wait_backup backup-minio-not-base-sharded + local backup_name=backup-minio-not-base-sharded + local target_state=ready + set +o xtrace waiting for backup-minio-not-base-sharded to reach ready state........ + check_backup_in_storage backup-minio-not-base-sharded minio rs0 + local backup=backup-minio-not-base-sharded + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio-not-base-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ local backup_name=backup-minio-not-base-sharded ++ kubectl_bin get psmdb-backup backup-minio-not-base-sharded -o 'jsonpath={.status.destination}' +++ mktemp ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ local LAST_OUT=/tmp/tmp.4MWYRYXsLh +++ mktemp ++ sed 's|azure://||' ++ local LAST_ERR=/tmp/tmp.q36mas42aV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-not-base-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4MWYRYXsLh ++ cat /tmp/tmp.q36mas42aV ++ rm /tmp/tmp.4MWYRYXsLh /tmp/tmp.q36mas42aV ++ return 0 + backup_dest=operator-testing/2025-08-07T08:33:39Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T08:33:39Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.WVa4XpertT ++ mktemp + local LAST_ERR=/tmp/tmp.IvVKzHFv7E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T08:33:39Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WVa4XpertT + cat /tmp/tmp.IvVKzHFv7E + rm /tmp/tmp.WVa4XpertT /tmp/tmp.IvVKzHFv7E + return 0 2025-08-07 08:33:50 12172 filelist.pbm + '[' -z '' ']' + run_restore backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + log 'drop collection' + set +o xtrace [2025-08-07T08:34:01+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bqXHnhwRd5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.alH59r3b9f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bqXHnhwRd5 ++ cat /tmp/tmp.alH59r3b9f ++ rm /tmp/tmp.bqXHnhwRd5 /tmp/tmp.alH59r3b9f ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.xb63id0WwU ++ mktemp + local LAST_ERR=/tmp/tmp.wRb76rBP8d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xb63id0WwU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("2e89a155-78fa-4cea-9db2-1339244d9fda") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.wRb76rBP8d + rm /tmp/tmp.xb63id0WwU /tmp/tmp.wRb76rBP8d + return 0 + log 'check backup and restore -- backup-aws-s3-sharded' + set +o xtrace [2025-08-07T08:34:04+0000] check backup and restore -- backup-aws-s3-sharded + /usr/sbin/sed -e 's/backupName:/backupName: backup-aws-s3-sharded/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + kubectl_bin apply -f - + /usr/sbin/sed -e 's/name:/name: restore-backup-aws-s3-sharded/' ++ mktemp + local LAST_OUT=/tmp/tmp.eOkmjjTPuL ++ mktemp + local LAST_ERR=/tmp/tmp.9VbxZpgZrN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eOkmjjTPuL perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3-sharded created + cat /tmp/tmp.9VbxZpgZrN + rm /tmp/tmp.eOkmjjTPuL /tmp/tmp.9VbxZpgZrN + return 0 + run_recovery_check backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-aws-s3-sharded some-name requested 0 3000 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3-sharded object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3-sharded to reach state "requested" ....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-20877", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.xP5u16SPZV ++ mktemp + local LAST_ERR=/tmp/tmp.mCVvUJ7eNv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xP5u16SPZV + cat /tmp/tmp.mCVvUJ7eNv + rm /tmp/tmp.xP5u16SPZV /tmp/tmp.mCVvUJ7eNv + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + wait_restore backup-aws-s3-sharded some-name ready 0 3000 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3-sharded object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3-sharded to reach state "ready" ..OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.l5xPmmIvyQ ++ mktemp + local LAST_ERR=/tmp/tmp.4BD8wUndrP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l5xPmmIvyQ apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-20877"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-08-07T08:26:50Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-20877 resourceVersion: "1754556053449327008" uid: 81ff0247-c824-4184-acc6-788b25cecf0b spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.21.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f conditions: - lastTransitionTime: "2025-08-07T08:26:50Z" status: "True" type: sharding - lastTransitionTime: "2025-08-07T08:26:55Z" status: "True" type: initializing - lastTransitionTime: "2025-08-07T08:28:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready host: 34.30.88.110 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.22-12 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.4BD8wUndrP + rm /tmp/tmp.l5xPmmIvyQ /tmp/tmp.4BD8wUndrP + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zgYPaLAFmr +++ mktemp ++ local LAST_ERR=/tmp/tmp.3DqLMUuB50 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zgYPaLAFmr ++ cat /tmp/tmp.3DqLMUuB50 ++ rm /tmp/tmp.zgYPaLAFmr /tmp/tmp.3DqLMUuB50 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OGbkVQsInH +++ mktemp ++ local LAST_ERR=/tmp/tmp.E51SalEryx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OGbkVQsInH ++ cat /tmp/tmp.E51SalEryx ++ rm /tmp/tmp.OGbkVQsInH /tmp/tmp.E51SalEryx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PkyFOHx1SI +++ mktemp ++ local LAST_ERR=/tmp/tmp.zFNjrA85Xk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PkyFOHx1SI ++ cat /tmp/tmp.zFNjrA85Xk ++ rm /tmp/tmp.PkyFOHx1SI /tmp/tmp.zFNjrA85Xk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l5yAK2oUOF +++ mktemp ++ local LAST_ERR=/tmp/tmp.YEQuTd6Dwy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l5yAK2oUOF ++ cat /tmp/tmp.YEQuTd6Dwy ++ rm /tmp/tmp.l5yAK2oUOF /tmp/tmp.YEQuTd6Dwy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mt7rdV49ir +++ mktemp ++ local LAST_ERR=/tmp/tmp.eYdObN3uRz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mt7rdV49ir ++ cat /tmp/tmp.eYdObN3uRz ++ rm /tmp/tmp.mt7rdV49ir /tmp/tmp.eYdObN3uRz ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qrGGoFwEO7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NFpCjKbT51 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qrGGoFwEO7 ++ cat /tmp/tmp.NFpCjKbT51 ++ rm /tmp/tmp.qrGGoFwEO7 /tmp/tmp.NFpCjKbT51 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.inmq8JWRph +++ mktemp ++ local LAST_ERR=/tmp/tmp.GAQHYKfGxh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.inmq8JWRph ++ cat /tmp/tmp.GAQHYKfGxh ++ rm /tmp/tmp.inmq8JWRph /tmp/tmp.GAQHYKfGxh ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OcEwORZSIa +++ mktemp ++ local LAST_ERR=/tmp/tmp.8FwBvmnZSP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OcEwORZSIa ++ cat /tmp/tmp.8FwBvmnZSP ++ rm /tmp/tmp.OcEwORZSIa /tmp/tmp.8FwBvmnZSP ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LBZYKPFHRk +++ mktemp ++ local LAST_ERR=/tmp/tmp.SRXK2CXAof ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LBZYKPFHRk ++ cat /tmp/tmp.SRXK2CXAof ++ rm /tmp/tmp.LBZYKPFHRk /tmp/tmp.SRXK2CXAof ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XBFNCER3kr +++ mktemp ++ local LAST_ERR=/tmp/tmp.8eAu9oYEoH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XBFNCER3kr ++ cat /tmp/tmp.8eAu9oYEoH ++ rm /tmp/tmp.XBFNCER3kr /tmp/tmp.8eAu9oYEoH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ApwDA4nV11 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pP1gsoEMvb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ApwDA4nV11 ++ cat /tmp/tmp.pP1gsoEMvb ++ rm /tmp/tmp.ApwDA4nV11 /tmp/tmp.pP1gsoEMvb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wja0qqBbfc +++ mktemp ++ local LAST_ERR=/tmp/tmp.0vWXvHPgpx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wja0qqBbfc ++ cat /tmp/tmp.0vWXvHPgpx ++ rm /tmp/tmp.Wja0qqBbfc /tmp/tmp.0vWXvHPgpx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aylaRNr7FF +++ mktemp ++ local LAST_ERR=/tmp/tmp.hHVDtCS1lF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aylaRNr7FF ++ cat /tmp/tmp.hHVDtCS1lF ++ rm /tmp/tmp.aylaRNr7FF /tmp/tmp.hHVDtCS1lF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VLYVNmFd4C +++ mktemp ++ local LAST_ERR=/tmp/tmp.lD35Cx6evi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VLYVNmFd4C ++ cat /tmp/tmp.lD35Cx6evi ++ rm /tmp/tmp.VLYVNmFd4C /tmp/tmp.lD35Cx6evi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UKpErWM3bX +++ mktemp ++ local LAST_ERR=/tmp/tmp.OJo7KHnuwJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UKpErWM3bX ++ cat /tmp/tmp.OJo7KHnuwJ ++ rm /tmp/tmp.UKpErWM3bX /tmp/tmp.OJo7KHnuwJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HpSU038YXl +++ mktemp ++ local LAST_ERR=/tmp/tmp.5nkbi9lNTl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HpSU038YXl ++ cat /tmp/tmp.5nkbi9lNTl ++ rm /tmp/tmp.HpSU038YXl /tmp/tmp.5nkbi9lNTl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.owW2ViogoY +++ mktemp ++ local LAST_ERR=/tmp/tmp.hnQSa4H97B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.owW2ViogoY ++ cat /tmp/tmp.hnQSa4H97B ++ rm /tmp/tmp.owW2ViogoY /tmp/tmp.hnQSa4H97B ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OC0sa3uiT8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LaHsEljsUr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OC0sa3uiT8 ++ cat /tmp/tmp.LaHsEljsUr ++ rm /tmp/tmp.OC0sa3uiT8 /tmp/tmp.LaHsEljsUr ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pUo4u4AtSn +++ mktemp ++ local LAST_ERR=/tmp/tmp.dXFz32GzRp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pUo4u4AtSn ++ cat /tmp/tmp.dXFz32GzRp ++ rm /tmp/tmp.pUo4u4AtSn /tmp/tmp.dXFz32GzRp ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5OER877uKB ++ mktemp + local LAST_ERR=/tmp/tmp.BnJ2PdFJEq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5OER877uKB + cat /tmp/tmp.BnJ2PdFJEq + rm /tmp/tmp.5OER877uKB /tmp/tmp.BnJ2PdFJEq + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.nCNFgSHZFv/find-sharded + check_exported_mongos_service_endpoint 34.30.88.110 + local host=34.30.88.110 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E4P79SoCGZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.wkXzR7yavG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E4P79SoCGZ ++ cat /tmp/tmp.wkXzR7yavG ++ rm /tmp/tmp.E4P79SoCGZ /tmp/tmp.wkXzR7yavG ++ return 0 + '[' 34.30.88.110 '!=' 34.30.88.110 ']' + run_restore backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + log 'drop collection' + set +o xtrace [2025-08-07T08:44:09+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NUvAEBqG3T +++ mktemp ++ local LAST_ERR=/tmp/tmp.vui22d5l76 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NUvAEBqG3T ++ cat /tmp/tmp.vui22d5l76 ++ rm /tmp/tmp.NUvAEBqG3T /tmp/tmp.vui22d5l76 ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.uEQelG9r0w ++ mktemp + local LAST_ERR=/tmp/tmp.Lk8Ti9P5cO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uEQelG9r0w Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("a6314f86-4dc7-4a18-8dd9-69a31e352fb3") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.Lk8Ti9P5cO + rm /tmp/tmp.uEQelG9r0w /tmp/tmp.Lk8Ti9P5cO + return 0 + log 'check backup and restore -- backup-gcp-cs-sharded' + set +o xtrace [2025-08-07T08:44:12+0000] check backup and restore -- backup-gcp-cs-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/backupName:/backupName: backup-gcp-cs-sharded/' + kubectl_bin apply -f - + /usr/sbin/sed -e 's/name:/name: restore-backup-gcp-cs-sharded/' ++ mktemp + local LAST_OUT=/tmp/tmp.o84y8Yjg6Y ++ mktemp + local LAST_ERR=/tmp/tmp.YFynqYeDiv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o84y8Yjg6Y perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs-sharded created + cat /tmp/tmp.YFynqYeDiv + rm /tmp/tmp.o84y8Yjg6Y /tmp/tmp.YFynqYeDiv + return 0 + run_recovery_check backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-gcp-cs-sharded some-name requested 0 3000 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs-sharded object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs-sharded to reach state "requested" ....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-20877", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.A3DdRNlAGV ++ mktemp + local LAST_ERR=/tmp/tmp.CU3mnvzJQI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A3DdRNlAGV + cat /tmp/tmp.CU3mnvzJQI + rm /tmp/tmp.A3DdRNlAGV /tmp/tmp.CU3mnvzJQI + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + wait_restore backup-gcp-cs-sharded some-name ready 0 3000 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs-sharded object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs-sharded to reach state "ready" ..OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.1AeqFYVaXs ++ mktemp + local LAST_ERR=/tmp/tmp.eSQrxNJ9xg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1AeqFYVaXs apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-20877"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-08-07T08:26:50Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-20877 resourceVersion: "1754556659793439008" uid: 81ff0247-c824-4184-acc6-788b25cecf0b spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.21.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f conditions: - lastTransitionTime: "2025-08-07T08:26:50Z" status: "True" type: sharding - lastTransitionTime: "2025-08-07T08:26:55Z" status: "True" type: initializing - lastTransitionTime: "2025-08-07T08:28:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-08-07T08:41:34Z" message: |- handle ReplicaSetNoPrimary: get standalone mongo client: ping mongo: server selection error: server selection timeout, current topology: { Type: Single, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp: lookup some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local on 34.118.224.10:53: no such host }, ] } handle ReplicaSetNoPrimary: get standalone mongo client: ping mongo: server selection error: context deadline exceeded, current topology: { Type: Single, Servers: [{ Addr: some-name-rs0-0.some-name-rs0.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp: lookup some-name-rs0-0.some-name-rs0.demand-backup-incremental-sharded-20877.svc.cluster.local on 34.118.224.10:53: no such host }, ] } reason: ErrorReconcile status: "True" type: error host: 34.30.88.110 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.22-12 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.eSQrxNJ9xg + rm /tmp/tmp.1AeqFYVaXs /tmp/tmp.eSQrxNJ9xg + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4ItRZc2iqj +++ mktemp ++ local LAST_ERR=/tmp/tmp.R4k7mHJ6hX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4ItRZc2iqj ++ cat /tmp/tmp.R4k7mHJ6hX ++ rm /tmp/tmp.4ItRZc2iqj /tmp/tmp.R4k7mHJ6hX ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vVD8Xslhh7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3pSpdKSNfj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vVD8Xslhh7 ++ cat /tmp/tmp.3pSpdKSNfj ++ rm /tmp/tmp.vVD8Xslhh7 /tmp/tmp.3pSpdKSNfj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fKwHa0bgnI +++ mktemp ++ local LAST_ERR=/tmp/tmp.8yn42GEytr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fKwHa0bgnI ++ cat /tmp/tmp.8yn42GEytr ++ rm /tmp/tmp.fKwHa0bgnI /tmp/tmp.8yn42GEytr ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5YBULaLg7P +++ mktemp ++ local LAST_ERR=/tmp/tmp.13IiEk0RL1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5YBULaLg7P ++ cat /tmp/tmp.13IiEk0RL1 ++ rm /tmp/tmp.5YBULaLg7P /tmp/tmp.13IiEk0RL1 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X7tPtSRVC7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fFWiS6cDKC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X7tPtSRVC7 ++ cat /tmp/tmp.fFWiS6cDKC ++ rm /tmp/tmp.X7tPtSRVC7 /tmp/tmp.fFWiS6cDKC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ntkQo4vyrE +++ mktemp ++ local LAST_ERR=/tmp/tmp.0pfop0At8V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ntkQo4vyrE ++ cat /tmp/tmp.0pfop0At8V ++ rm /tmp/tmp.ntkQo4vyrE /tmp/tmp.0pfop0At8V ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZbSW9ctKOQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.OMtEdCTImX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZbSW9ctKOQ ++ cat /tmp/tmp.OMtEdCTImX ++ rm /tmp/tmp.ZbSW9ctKOQ /tmp/tmp.OMtEdCTImX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gKwEDmH7Kr +++ mktemp ++ local LAST_ERR=/tmp/tmp.3uki8AMIX7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gKwEDmH7Kr ++ cat /tmp/tmp.3uki8AMIX7 ++ rm /tmp/tmp.gKwEDmH7Kr /tmp/tmp.3uki8AMIX7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oCCoyv78yw +++ mktemp ++ local LAST_ERR=/tmp/tmp.fQjnNrMqpc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oCCoyv78yw ++ cat /tmp/tmp.fQjnNrMqpc ++ rm /tmp/tmp.oCCoyv78yw /tmp/tmp.fQjnNrMqpc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qo0TFSek8b +++ mktemp ++ local LAST_ERR=/tmp/tmp.LLe3IU2Nru ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qo0TFSek8b ++ cat /tmp/tmp.LLe3IU2Nru ++ rm /tmp/tmp.qo0TFSek8b /tmp/tmp.LLe3IU2Nru ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5v1hDkImNL +++ mktemp ++ local LAST_ERR=/tmp/tmp.7e9K3XMcwZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5v1hDkImNL ++ cat /tmp/tmp.7e9K3XMcwZ ++ rm /tmp/tmp.5v1hDkImNL /tmp/tmp.7e9K3XMcwZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.00F23y6mEQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.JJYQXuZqDC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.00F23y6mEQ ++ cat /tmp/tmp.JJYQXuZqDC ++ rm /tmp/tmp.00F23y6mEQ /tmp/tmp.JJYQXuZqDC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I1gr9IsbUZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.4a89ZH0sbN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I1gr9IsbUZ ++ cat /tmp/tmp.4a89ZH0sbN ++ rm /tmp/tmp.I1gr9IsbUZ /tmp/tmp.4a89ZH0sbN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j4jCq0svVT +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z412mKphiw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j4jCq0svVT ++ cat /tmp/tmp.Z412mKphiw ++ rm /tmp/tmp.j4jCq0svVT /tmp/tmp.Z412mKphiw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jKX3PHV1tS +++ mktemp ++ local LAST_ERR=/tmp/tmp.iGJuTuOVw4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jKX3PHV1tS ++ cat /tmp/tmp.iGJuTuOVw4 ++ rm /tmp/tmp.jKX3PHV1tS /tmp/tmp.iGJuTuOVw4 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HCd454vzH7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SSTwPHwm6I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HCd454vzH7 ++ cat /tmp/tmp.SSTwPHwm6I ++ rm /tmp/tmp.HCd454vzH7 /tmp/tmp.SSTwPHwm6I ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Xn180pzeuF ++ mktemp + local LAST_ERR=/tmp/tmp.qOe43r1QhQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xn180pzeuF + cat /tmp/tmp.qOe43r1QhQ + rm /tmp/tmp.Xn180pzeuF /tmp/tmp.qOe43r1QhQ + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.nCNFgSHZFv/find-sharded + check_exported_mongos_service_endpoint 34.30.88.110 + local host=34.30.88.110 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qgBVuJVlaV +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZhKGUnCi8X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qgBVuJVlaV ++ cat /tmp/tmp.ZhKGUnCi8X ++ rm /tmp/tmp.qgBVuJVlaV /tmp/tmp.ZhKGUnCi8X ++ return 0 + '[' 34.30.88.110 '!=' 34.30.88.110 ']' + run_restore backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + log 'drop collection' + set +o xtrace [2025-08-07T08:53:41+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.weDTFZwzlg +++ mktemp ++ local LAST_ERR=/tmp/tmp.TPphvCCK3f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.weDTFZwzlg ++ cat /tmp/tmp.TPphvCCK3f ++ rm /tmp/tmp.weDTFZwzlg /tmp/tmp.TPphvCCK3f ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.THr1SPpRsp ++ mktemp + local LAST_ERR=/tmp/tmp.WcwkloRuU4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.THr1SPpRsp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("77ea70dd-7aa0-4ba0-9e2b-5cd1ae307378") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.WcwkloRuU4 + rm /tmp/tmp.THr1SPpRsp /tmp/tmp.WcwkloRuU4 + return 0 + log 'check backup and restore -- backup-azure-blob-sharded' + set +o xtrace [2025-08-07T08:53:45+0000] check backup and restore -- backup-azure-blob-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-azure-blob-sharded/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-azure-blob-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GJo1WdRkxu ++ mktemp + local LAST_ERR=/tmp/tmp.F8EqxLWuW0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GJo1WdRkxu perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob-sharded created + cat /tmp/tmp.F8EqxLWuW0 + rm /tmp/tmp.GJo1WdRkxu /tmp/tmp.F8EqxLWuW0 + return 0 + run_recovery_check backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-azure-blob-sharded some-name requested 0 3000 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob-sharded object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob-sharded to reach state "requested" ....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-20877", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.N9GsgdHwxv ++ mktemp + local LAST_ERR=/tmp/tmp.EpoN3348PG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N9GsgdHwxv + cat /tmp/tmp.EpoN3348PG + rm /tmp/tmp.N9GsgdHwxv /tmp/tmp.EpoN3348PG + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + wait_restore backup-azure-blob-sharded some-name ready 0 3000 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob-sharded object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob-sharded to reach state "ready" ..OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vklGU4fXD5 ++ mktemp + local LAST_ERR=/tmp/tmp.HXtqfGPGHX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vklGU4fXD5 apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-20877"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-08-07T08:26:50Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-20877 resourceVersion: "1754557233928943008" uid: 81ff0247-c824-4184-acc6-788b25cecf0b spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.21.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f conditions: - lastTransitionTime: "2025-08-07T08:26:50Z" status: "True" type: sharding - lastTransitionTime: "2025-08-07T08:26:55Z" status: "True" type: initializing - lastTransitionTime: "2025-08-07T08:28:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-08-07T08:41:34Z" message: 'dial: ping mongo: server selection error: context deadline exceeded, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp: lookup some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local on 34.118.224.10:53: no such host }, { Addr: some-name-cfg-1.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.252.25.27:27017: connect: connection refused }, { Addr: some-name-cfg-2.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.252.24.29:27017: connect: connection refused }, ] }' reason: ErrorReconcile status: "True" type: error host: 34.30.88.110 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.22-12 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.HXtqfGPGHX + rm /tmp/tmp.vklGU4fXD5 /tmp/tmp.HXtqfGPGHX + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mIWNjTNSV5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tJpK4QtJwL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mIWNjTNSV5 ++ cat /tmp/tmp.tJpK4QtJwL ++ rm /tmp/tmp.mIWNjTNSV5 /tmp/tmp.tJpK4QtJwL ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AgGZU9LsI2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hsV5xfo1CZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AgGZU9LsI2 ++ cat /tmp/tmp.hsV5xfo1CZ ++ rm /tmp/tmp.AgGZU9LsI2 /tmp/tmp.hsV5xfo1CZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vzmfTGQ9cv +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZhTAPnCNr6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vzmfTGQ9cv ++ cat /tmp/tmp.ZhTAPnCNr6 ++ rm /tmp/tmp.vzmfTGQ9cv /tmp/tmp.ZhTAPnCNr6 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UCSnXvzFBl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ae196uIsBk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UCSnXvzFBl ++ cat /tmp/tmp.Ae196uIsBk ++ rm /tmp/tmp.UCSnXvzFBl /tmp/tmp.Ae196uIsBk ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.elNBF1rh9k +++ mktemp ++ local LAST_ERR=/tmp/tmp.N6OE79XHiC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.elNBF1rh9k ++ cat /tmp/tmp.N6OE79XHiC ++ rm /tmp/tmp.elNBF1rh9k /tmp/tmp.N6OE79XHiC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vo4SjOcTxR +++ mktemp ++ local LAST_ERR=/tmp/tmp.XNbOUHvF5p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vo4SjOcTxR ++ cat /tmp/tmp.XNbOUHvF5p ++ rm /tmp/tmp.vo4SjOcTxR /tmp/tmp.XNbOUHvF5p ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.798OcBd0Tc +++ mktemp ++ local LAST_ERR=/tmp/tmp.pTTuEybodP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.798OcBd0Tc ++ cat /tmp/tmp.pTTuEybodP ++ rm /tmp/tmp.798OcBd0Tc /tmp/tmp.pTTuEybodP ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YvzxFxzY1b +++ mktemp ++ local LAST_ERR=/tmp/tmp.SNcx91WEyZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YvzxFxzY1b ++ cat /tmp/tmp.SNcx91WEyZ ++ rm /tmp/tmp.YvzxFxzY1b /tmp/tmp.SNcx91WEyZ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l1PylLBIuR +++ mktemp ++ local LAST_ERR=/tmp/tmp.OPr1lLg0eg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l1PylLBIuR ++ cat /tmp/tmp.OPr1lLg0eg ++ rm /tmp/tmp.l1PylLBIuR /tmp/tmp.OPr1lLg0eg ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RHfYjXXEOi +++ mktemp ++ local LAST_ERR=/tmp/tmp.bvBPdledAO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RHfYjXXEOi ++ cat /tmp/tmp.bvBPdledAO ++ rm /tmp/tmp.RHfYjXXEOi /tmp/tmp.bvBPdledAO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FAh2n6Ehot +++ mktemp ++ local LAST_ERR=/tmp/tmp.j5NEP21cTI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FAh2n6Ehot ++ cat /tmp/tmp.j5NEP21cTI ++ rm /tmp/tmp.FAh2n6Ehot /tmp/tmp.j5NEP21cTI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vQH8GbVIue +++ mktemp ++ local LAST_ERR=/tmp/tmp.nxC6L4rmvK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vQH8GbVIue ++ cat /tmp/tmp.nxC6L4rmvK ++ rm /tmp/tmp.vQH8GbVIue /tmp/tmp.nxC6L4rmvK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HLM0ElWJoU +++ mktemp ++ local LAST_ERR=/tmp/tmp.KPZ8IwxQS6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HLM0ElWJoU ++ cat /tmp/tmp.KPZ8IwxQS6 ++ rm /tmp/tmp.HLM0ElWJoU /tmp/tmp.KPZ8IwxQS6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rN1GU6FFQF +++ mktemp ++ local LAST_ERR=/tmp/tmp.klCYy8K7xz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rN1GU6FFQF ++ cat /tmp/tmp.klCYy8K7xz ++ rm /tmp/tmp.rN1GU6FFQF /tmp/tmp.klCYy8K7xz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XaXNGTmWEK +++ mktemp ++ local LAST_ERR=/tmp/tmp.0PUct1whrV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XaXNGTmWEK ++ cat /tmp/tmp.0PUct1whrV ++ rm /tmp/tmp.XaXNGTmWEK /tmp/tmp.0PUct1whrV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VVLiTNhado +++ mktemp ++ local LAST_ERR=/tmp/tmp.xHUeHtxmt7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VVLiTNhado ++ cat /tmp/tmp.xHUeHtxmt7 ++ rm /tmp/tmp.VVLiTNhado /tmp/tmp.xHUeHtxmt7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sqyf8y8d1z +++ mktemp ++ local LAST_ERR=/tmp/tmp.RfquzO6hBR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sqyf8y8d1z ++ cat /tmp/tmp.RfquzO6hBR ++ rm /tmp/tmp.sqyf8y8d1z /tmp/tmp.RfquzO6hBR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bNVFplfvJn +++ mktemp ++ local LAST_ERR=/tmp/tmp.MqBTrq4FRx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bNVFplfvJn ++ cat /tmp/tmp.MqBTrq4FRx ++ rm /tmp/tmp.bNVFplfvJn /tmp/tmp.MqBTrq4FRx ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish. + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e0bd429WBc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vs4uNuSaeq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e0bd429WBc ++ cat /tmp/tmp.Vs4uNuSaeq ++ rm /tmp/tmp.e0bd429WBc /tmp/tmp.Vs4uNuSaeq ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.VBlICDQjOZ ++ mktemp + local LAST_ERR=/tmp/tmp.lSmIHP9GmK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VBlICDQjOZ + cat /tmp/tmp.lSmIHP9GmK + rm /tmp/tmp.VBlICDQjOZ /tmp/tmp.lSmIHP9GmK + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.nCNFgSHZFv/find-sharded + check_exported_mongos_service_endpoint 34.30.88.110 + local host=34.30.88.110 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7Weo7lCFtX +++ mktemp ++ local LAST_ERR=/tmp/tmp.a9zk10etGE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7Weo7lCFtX ++ cat /tmp/tmp.a9zk10etGE ++ rm /tmp/tmp.7Weo7lCFtX /tmp/tmp.a9zk10etGE ++ return 0 + '[' 34.30.88.110 '!=' 34.30.88.110 ']' + run_restore backup-minio-not-base-sharded _restore_sharded + local backup_name=backup-minio-not-base-sharded + log 'drop collection' + set +o xtrace [2025-08-07T09:03:57+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q48FYLOMmB +++ mktemp ++ local LAST_ERR=/tmp/tmp.jIsyHBL1ge ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q48FYLOMmB ++ cat /tmp/tmp.jIsyHBL1ge ++ rm /tmp/tmp.q48FYLOMmB /tmp/tmp.jIsyHBL1ge ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.gBl2oIZDQl ++ mktemp + local LAST_ERR=/tmp/tmp.CCPezZALGK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gBl2oIZDQl Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("314745d2-391a-41df-b214-bd4c472d3c40") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.CCPezZALGK + rm /tmp/tmp.gBl2oIZDQl /tmp/tmp.CCPezZALGK + return 0 + log 'check backup and restore -- backup-minio-not-base-sharded' + set +o xtrace [2025-08-07T09:03:59+0000] check backup and restore -- backup-minio-not-base-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-not-base-sharded/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-not-base-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2NfmXrud2q ++ mktemp + local LAST_ERR=/tmp/tmp.swQrUDSYx8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2NfmXrud2q perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-not-base-sharded created + cat /tmp/tmp.swQrUDSYx8 + rm /tmp/tmp.2NfmXrud2q /tmp/tmp.swQrUDSYx8 + return 0 + run_recovery_check backup-minio-not-base-sharded _restore_sharded false + local backup_name=backup-minio-not-base-sharded + local compare_suffix=_restore_sharded + local base=false + wait_restore backup-minio-not-base-sharded some-name requested 0 3000 + local backup_name=backup-minio-not-base-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base-sharded object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base-sharded to reach state "requested" ....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-20877", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.IDmpsda3sX ++ mktemp + local LAST_ERR=/tmp/tmp.tEP7K2I1l0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IDmpsda3sX + cat /tmp/tmp.tEP7K2I1l0 + rm /tmp/tmp.IDmpsda3sX /tmp/tmp.tEP7K2I1l0 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + wait_restore backup-minio-not-base-sharded some-name ready 0 3000 + local backup_name=backup-minio-not-base-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base-sharded object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base-sharded to reach state "ready" ..OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.wKDVaMI7Um ++ mktemp + local LAST_ERR=/tmp/tmp.EvimfMHMW1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wKDVaMI7Um apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-20877"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-08-07T08:26:50Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-20877 resourceVersion: "1754557859030079008" uid: 81ff0247-c824-4184-acc6-788b25cecf0b spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.21.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f conditions: - lastTransitionTime: "2025-08-07T08:26:50Z" status: "True" type: sharding - lastTransitionTime: "2025-08-07T08:26:55Z" status: "True" type: initializing - lastTransitionTime: "2025-08-07T08:28:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-08-07T08:41:34Z" message: 'dial: ping mongo: server selection error: context deadline exceeded, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.252.24.34:27017: connect: connection refused }, { Addr: some-name-cfg-1.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp: lookup some-name-cfg-1.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local on 34.118.224.10:53: no such host }, { Addr: some-name-cfg-2.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.252.25.32:27017: connect: connection refused }, ] }' reason: ErrorReconcile status: "True" type: error host: 34.30.88.110 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.22-12 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.EvimfMHMW1 + rm /tmp/tmp.wKDVaMI7Um /tmp/tmp.EvimfMHMW1 + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UYUyBJMyEj +++ mktemp ++ local LAST_ERR=/tmp/tmp.r09fEOZ4cd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UYUyBJMyEj ++ cat /tmp/tmp.r09fEOZ4cd ++ rm /tmp/tmp.UYUyBJMyEj /tmp/tmp.r09fEOZ4cd ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CMZulnwYld +++ mktemp ++ local LAST_ERR=/tmp/tmp.b0spPlMLcq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CMZulnwYld ++ cat /tmp/tmp.b0spPlMLcq ++ rm /tmp/tmp.CMZulnwYld /tmp/tmp.b0spPlMLcq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iACPyV8707 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mekhlmvSR6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iACPyV8707 ++ cat /tmp/tmp.mekhlmvSR6 ++ rm /tmp/tmp.iACPyV8707 /tmp/tmp.mekhlmvSR6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U0N96n4ncB +++ mktemp ++ local LAST_ERR=/tmp/tmp.9eWH9huTdP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U0N96n4ncB ++ cat /tmp/tmp.9eWH9huTdP ++ rm /tmp/tmp.U0N96n4ncB /tmp/tmp.9eWH9huTdP ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jMsksjUTj0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kyL3JEFquY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jMsksjUTj0 ++ cat /tmp/tmp.kyL3JEFquY ++ rm /tmp/tmp.jMsksjUTj0 /tmp/tmp.kyL3JEFquY ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.on39yUL130 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gcMAg74yjC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.on39yUL130 ++ cat /tmp/tmp.gcMAg74yjC ++ rm /tmp/tmp.on39yUL130 /tmp/tmp.gcMAg74yjC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H4iZnk2h6G +++ mktemp ++ local LAST_ERR=/tmp/tmp.rR5TiRfiOp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H4iZnk2h6G ++ cat /tmp/tmp.rR5TiRfiOp ++ rm /tmp/tmp.H4iZnk2h6G /tmp/tmp.rR5TiRfiOp ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MF7m17KeFE +++ mktemp ++ local LAST_ERR=/tmp/tmp.tDkxz1AHP1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MF7m17KeFE ++ cat /tmp/tmp.tDkxz1AHP1 ++ rm /tmp/tmp.MF7m17KeFE /tmp/tmp.tDkxz1AHP1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3hV0Y7A2Qp +++ mktemp ++ local LAST_ERR=/tmp/tmp.jE78njiTFN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3hV0Y7A2Qp ++ cat /tmp/tmp.jE78njiTFN ++ rm /tmp/tmp.3hV0Y7A2Qp /tmp/tmp.jE78njiTFN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x9PjfTq8o5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FlTAad64lO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x9PjfTq8o5 ++ cat /tmp/tmp.FlTAad64lO ++ rm /tmp/tmp.x9PjfTq8o5 /tmp/tmp.FlTAad64lO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OB6ThBCTSE +++ mktemp ++ local LAST_ERR=/tmp/tmp.IuB0Kf8UmA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OB6ThBCTSE ++ cat /tmp/tmp.IuB0Kf8UmA ++ rm /tmp/tmp.OB6ThBCTSE /tmp/tmp.IuB0Kf8UmA ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XkQK7YKTw1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.b0GBa0HF4Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XkQK7YKTw1 ++ cat /tmp/tmp.b0GBa0HF4Z ++ rm /tmp/tmp.XkQK7YKTw1 /tmp/tmp.b0GBa0HF4Z ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GLPfIWiQqo +++ mktemp ++ local LAST_ERR=/tmp/tmp.WCRranCAhL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GLPfIWiQqo ++ cat /tmp/tmp.WCRranCAhL ++ rm /tmp/tmp.GLPfIWiQqo /tmp/tmp.WCRranCAhL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gsn42ADqJc +++ mktemp ++ local LAST_ERR=/tmp/tmp.18JSsQssTy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gsn42ADqJc ++ cat /tmp/tmp.18JSsQssTy ++ rm /tmp/tmp.Gsn42ADqJc /tmp/tmp.18JSsQssTy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7vJ0QFnvWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.LPC6CIU6Sk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7vJ0QFnvWa ++ cat /tmp/tmp.LPC6CIU6Sk ++ rm /tmp/tmp.7vJ0QFnvWa /tmp/tmp.LPC6CIU6Sk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZZVDGPDqO5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BYAv3LujEo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZZVDGPDqO5 ++ cat /tmp/tmp.BYAv3LujEo ++ rm /tmp/tmp.ZZVDGPDqO5 /tmp/tmp.BYAv3LujEo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vU6KVMikZz +++ mktemp ++ local LAST_ERR=/tmp/tmp.UU4ExJWXh4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vU6KVMikZz ++ cat /tmp/tmp.UU4ExJWXh4 ++ rm /tmp/tmp.vU6KVMikZz /tmp/tmp.UU4ExJWXh4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pHxrGXTRrQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.PubEBPqTxb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pHxrGXTRrQ ++ cat /tmp/tmp.PubEBPqTxb ++ rm /tmp/tmp.pHxrGXTRrQ /tmp/tmp.PubEBPqTxb ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ false == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 -not-base-sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local postfix=-not-base-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2e4tcIQeAh +++ mktemp ++ local LAST_ERR=/tmp/tmp.FAuCLsK1ed ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2e4tcIQeAh ++ cat /tmp/tmp.FAuCLsK1ed ++ rm /tmp/tmp.2e4tcIQeAh /tmp/tmp.FAuCLsK1ed ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.aLSK1IMNH3 ++ mktemp + local LAST_ERR=/tmp/tmp.Q1lSeetbU1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aLSK1IMNH3 + cat /tmp/tmp.Q1lSeetbU1 + rm /tmp/tmp.aLSK1IMNH3 /tmp/tmp.Q1lSeetbU1 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/find-not-base-sharded.json /tmp/tmp.nCNFgSHZFv/find-not-base-sharded + run_restore backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + log 'drop collection' + set +o xtrace [2025-08-07T09:14:14+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sKz1ndWcIc +++ mktemp ++ local LAST_ERR=/tmp/tmp.QEdbtuMr0q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sKz1ndWcIc ++ cat /tmp/tmp.QEdbtuMr0q ++ rm /tmp/tmp.sKz1ndWcIc /tmp/tmp.QEdbtuMr0q ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.HFl1X24Vwq ++ mktemp + local LAST_ERR=/tmp/tmp.Dk9Z4AXQuV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HFl1X24Vwq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("fe6a1125-ad9d-458d-a00d-c21a0fdb442b") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.Dk9Z4AXQuV + rm /tmp/tmp.HFl1X24Vwq /tmp/tmp.Dk9Z4AXQuV + return 0 + log 'check backup and restore -- backup-minio-sharded' + set +o xtrace [2025-08-07T09:14:16+0000] check backup and restore -- backup-minio-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-sharded/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.v2Dqqpo3Y1 ++ mktemp + local LAST_ERR=/tmp/tmp.cIqdi0hm4y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v2Dqqpo3Y1 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-sharded created + cat /tmp/tmp.cIqdi0hm4y + rm /tmp/tmp.v2Dqqpo3Y1 /tmp/tmp.cIqdi0hm4y + return 0 + run_recovery_check backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-minio-sharded some-name requested 0 3000 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-sharded object to be createdOK Waiting psmdb-restore/restore-backup-minio-sharded to reach state "requested" ...OK after 3 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-20877", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.3pEZa9M7CU ++ mktemp + local LAST_ERR=/tmp/tmp.fbPt9SeI9w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3pEZa9M7CU + cat /tmp/tmp.fbPt9SeI9w + rm /tmp/tmp.3pEZa9M7CU /tmp/tmp.fbPt9SeI9w + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.nCNFgSHZFv/statefulset_some-name-rs0.yml + wait_restore backup-minio-sharded some-name ready 0 3000 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-sharded object to be createdOK Waiting psmdb-restore/restore-backup-minio-sharded to reach state "ready" ..OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.22ejBEkzFy ++ mktemp + local LAST_ERR=/tmp/tmp.POO8qPn3oX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.22ejBEkzFy apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-20877"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod7.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-08-07T08:26:50Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-20877 resourceVersion: "1754558450128367008" uid: 81ff0247-c824-4184-acc6-788b25cecf0b spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.21.0 image: perconalab/percona-server-mongodb-operator:main-mongod7.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f conditions: - lastTransitionTime: "2025-08-07T08:26:50Z" status: "True" type: sharding - lastTransitionTime: "2025-08-07T08:26:55Z" status: "True" type: initializing - lastTransitionTime: "2025-08-07T08:28:35Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-08-07T08:41:34Z" message: |- dial: ping mongo: server selection error: context deadline exceeded, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.252.24.39:27017: connect: connection refused }, { Addr: some-name-cfg-1.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.252.25.37:27017: connect: connection refused }, { Addr: some-name-cfg-2.some-name-cfg.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.252.26.56:27017: connect: connection refused }, ] } handle ReplicaSetNoPrimary: get standalone mongo client: ping mongo: server selection error: server selection timeout, current topology: { Type: Single, Servers: [{ Addr: some-name-rs0-0.some-name-rs0.demand-backup-incremental-sharded-20877.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp: lookup some-name-rs0-0.some-name-rs0.demand-backup-incremental-sharded-20877.svc.cluster.local on 34.118.224.10:53: no such host }, ] } reason: ErrorReconcile status: "True" type: error host: 34.30.88.110 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod7.0 mongoVersion: 7.0.22-12 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.POO8qPn3oX + rm /tmp/tmp.22ejBEkzFy /tmp/tmp.POO8qPn3oX + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PSihy4hU2k +++ mktemp ++ local LAST_ERR=/tmp/tmp.KBVkPcTIlK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PSihy4hU2k ++ cat /tmp/tmp.KBVkPcTIlK ++ rm /tmp/tmp.PSihy4hU2k /tmp/tmp.KBVkPcTIlK ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BkMdDfkzVw +++ mktemp ++ local LAST_ERR=/tmp/tmp.AVbfV0f9xl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BkMdDfkzVw ++ cat /tmp/tmp.AVbfV0f9xl ++ rm /tmp/tmp.BkMdDfkzVw /tmp/tmp.AVbfV0f9xl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oHsVSYiiRD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jd4T1LFeQ0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oHsVSYiiRD ++ cat /tmp/tmp.Jd4T1LFeQ0 ++ rm /tmp/tmp.oHsVSYiiRD /tmp/tmp.Jd4T1LFeQ0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8uGRVCeGQB +++ mktemp ++ local LAST_ERR=/tmp/tmp.abeZzMYscf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8uGRVCeGQB ++ cat /tmp/tmp.abeZzMYscf ++ rm /tmp/tmp.8uGRVCeGQB /tmp/tmp.abeZzMYscf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S1lywN6bvM +++ mktemp ++ local LAST_ERR=/tmp/tmp.xNwrkO5ps3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S1lywN6bvM ++ cat /tmp/tmp.xNwrkO5ps3 ++ rm /tmp/tmp.S1lywN6bvM /tmp/tmp.xNwrkO5ps3 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zr9Wg454XO +++ mktemp ++ local LAST_ERR=/tmp/tmp.oPZkIUYrrt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zr9Wg454XO ++ cat /tmp/tmp.oPZkIUYrrt ++ rm /tmp/tmp.Zr9Wg454XO /tmp/tmp.oPZkIUYrrt ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yrQnTg97h0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LyP6N5YIFi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yrQnTg97h0 ++ cat /tmp/tmp.LyP6N5YIFi ++ rm /tmp/tmp.yrQnTg97h0 /tmp/tmp.LyP6N5YIFi ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c8EnW8LB5l +++ mktemp ++ local LAST_ERR=/tmp/tmp.ADAlxiM8Es ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c8EnW8LB5l ++ cat /tmp/tmp.ADAlxiM8Es ++ rm /tmp/tmp.c8EnW8LB5l /tmp/tmp.ADAlxiM8Es ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xnf9LpNVJ3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FUlHONJV40 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xnf9LpNVJ3 ++ cat /tmp/tmp.FUlHONJV40 ++ rm /tmp/tmp.Xnf9LpNVJ3 /tmp/tmp.FUlHONJV40 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hNGXSTW8Ex +++ mktemp ++ local LAST_ERR=/tmp/tmp.aof1X1ETHo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hNGXSTW8Ex ++ cat /tmp/tmp.aof1X1ETHo ++ rm /tmp/tmp.hNGXSTW8Ex /tmp/tmp.aof1X1ETHo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YKw11MamID +++ mktemp ++ local LAST_ERR=/tmp/tmp.WaatT2B9TQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YKw11MamID ++ cat /tmp/tmp.WaatT2B9TQ ++ rm /tmp/tmp.YKw11MamID /tmp/tmp.WaatT2B9TQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KP0iRCfVsx +++ mktemp ++ local LAST_ERR=/tmp/tmp.yAImpnp2hr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KP0iRCfVsx ++ cat /tmp/tmp.yAImpnp2hr ++ rm /tmp/tmp.KP0iRCfVsx /tmp/tmp.yAImpnp2hr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.30nJMBJqFD +++ mktemp ++ local LAST_ERR=/tmp/tmp.0bzith9xgR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.30nJMBJqFD ++ cat /tmp/tmp.0bzith9xgR ++ rm /tmp/tmp.30nJMBJqFD /tmp/tmp.0bzith9xgR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hAQ3FuhSTK +++ mktemp ++ local LAST_ERR=/tmp/tmp.l9W6sDeIoK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hAQ3FuhSTK ++ cat /tmp/tmp.l9W6sDeIoK ++ rm /tmp/tmp.hAQ3FuhSTK /tmp/tmp.l9W6sDeIoK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RgQnKJA3sO +++ mktemp ++ local LAST_ERR=/tmp/tmp.4UzaAr3mzX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RgQnKJA3sO ++ cat /tmp/tmp.4UzaAr3mzX ++ rm /tmp/tmp.RgQnKJA3sO /tmp/tmp.4UzaAr3mzX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4UlKLyDFI9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RgUmTURADz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4UlKLyDFI9 ++ cat /tmp/tmp.RgUmTURADz ++ rm /tmp/tmp.4UlKLyDFI9 /tmp/tmp.RgUmTURADz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s2UHa5XENB +++ mktemp ++ local LAST_ERR=/tmp/tmp.J49eCfxKob ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s2UHa5XENB ++ cat /tmp/tmp.J49eCfxKob ++ rm /tmp/tmp.s2UHa5XENB /tmp/tmp.J49eCfxKob ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.52ci0z4aYu +++ mktemp ++ local LAST_ERR=/tmp/tmp.XbqUb3ZybK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.52ci0z4aYu ++ cat /tmp/tmp.XbqUb3ZybK ++ rm /tmp/tmp.52ci0z4aYu /tmp/tmp.XbqUb3ZybK ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nbgRVtIRg5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WJbqk9vMjS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nbgRVtIRg5 ++ cat /tmp/tmp.WJbqk9vMjS ++ rm /tmp/tmp.nbgRVtIRg5 /tmp/tmp.WJbqk9vMjS ++ return 0 + local client_container=psmdb-client-b9788d8bc-2dj9x + kubectl_bin exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.XDOOOvtlxR ++ mktemp + local LAST_ERR=/tmp/tmp.9fyp0ruDgF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-2dj9x -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-20877.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XDOOOvtlxR + cat /tmp/tmp.9fyp0ruDgF + rm /tmp/tmp.XDOOOvtlxR /tmp/tmp.9fyp0ruDgF + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.nCNFgSHZFv/find-sharded + destroy demand-backup-incremental-sharded-20877 + local namespace=demand-backup-incremental-sharded-20877 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.FP4s5hEPmh +++ mktemp ++ local LAST_ERR=/tmp/tmp.VgCyujuHGN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FP4s5hEPmh ++ cat /tmp/tmp.VgCyujuHGN ++ rm /tmp/tmp.FP4s5hEPmh /tmp/tmp.VgCyujuHGN ++ return 0 + '[' 5 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.1j8qAta3vs ++ mktemp + local LAST_ERR=/tmp/tmp.xpcaEFgWSi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1j8qAta3vs NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-aws-s3-sharded some-name aws-s3 s3://operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:31:40Z incremental-base 1.80MB ready 52m 52m backup-azure-blob-sharded some-name azure-blob azure://operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:30Z incremental-base 2.96MB ready 51m 52m backup-gcp-cs-sharded some-name gcp-cs s3://operator-testing/psmdb-demand-backup-incremental-sharded/2025-08-07T08:32:08Z incremental-base 2.26MB ready 51m 52m backup-minio-not-base-sharded some-name minio s3://operator-testing/2025-08-07T08:33:39Z incremental 2.03MB ready 50m 50m backup-minio-sharded some-name minio s3://operator-testing/2025-08-07T08:32:58Z incremental-base 4.12MB ready 50m 51m + cat /tmp/tmp.xpcaEFgWSi + rm /tmp/tmp.1j8qAta3vs /tmp/tmp.xpcaEFgWSi + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.ItaTFGXTWO ++ mktemp + local LAST_ERR=/tmp/tmp.MRXVGbrPfz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ItaTFGXTWO perconaservermongodbbackup.psmdb.percona.com "backup-aws-s3-sharded" deleted perconaservermongodbbackup.psmdb.percona.com "backup-azure-blob-sharded" deleted perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs-sharded" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-not-base-sharded" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-sharded" deleted + cat /tmp/tmp.MRXVGbrPfz + rm /tmp/tmp.ItaTFGXTWO /tmp/tmp.MRXVGbrPfz + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.9XdPp3WtDZ ++ mktemp + local LAST_ERR=/tmp/tmp.P81JvWDAiR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9XdPp3WtDZ customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.P81JvWDAiR + rm /tmp/tmp.9XdPp3WtDZ /tmp/tmp.P81JvWDAiR + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.q7WLDupEem ++ mktemp + local LAST_ERR=/tmp/tmp.TZs256Zsuq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q7WLDupEem + cat /tmp/tmp.TZs256Zsuq + rm /tmp/tmp.q7WLDupEem /tmp/tmp.TZs256Zsuq + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.bbvnNYEU6D ++ mktemp + local LAST_ERR=/tmp/tmp.mAYci1PXcm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bbvnNYEU6D + cat /tmp/tmp.mAYci1PXcm + rm /tmp/tmp.bbvnNYEU6D /tmp/tmp.mAYci1PXcm + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.NTXwgUu6lp ++ mktemp + local LAST_ERR=/tmp/tmp.zc4UFzhIuC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NTXwgUu6lp + cat /tmp/tmp.zc4UFzhIuC + rm /tmp/tmp.NTXwgUu6lp /tmp/tmp.zc4UFzhIuC + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.79H70Y9oEA ++ mktemp + local LAST_ERR=/tmp/tmp.gcOhHQVtiX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.79H70Y9oEA clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.gcOhHQVtiX + rm /tmp/tmp.79H70Y9oEA /tmp/tmp.gcOhHQVtiX + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.4pJ0L6LQUP ++ mktemp + local LAST_ERR=/tmp/tmp.qQX2f6Dt4A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.4pJ0L6LQUP + cat /tmp/tmp.qQX2f6Dt4A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.4pJ0L6LQUP + cat /tmp/tmp.qQX2f6Dt4A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.4pJ0L6LQUP + cat /tmp/tmp.qQX2f6Dt4A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.4pJ0L6LQUP + cat /tmp/tmp.qQX2f6Dt4A Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.4pJ0L6LQUP /tmp/tmp.qQX2f6Dt4A + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-incremental-sharded-20877 + rm -rf /tmp/tmp.nCNFgSHZFv + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.1JT0HciUo2 ++ mktemp + local LAST_OUT=/tmp/tmp.8WPAkHTCJn ++ mktemp + local LAST_ERR=/tmp/tmp.43QeHw8iZq + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.2qqXKr56K2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-incremental-sharded-20877 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator