Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/logs/demand-backup-incremental-sharded.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-incremental-sharded-31890 + local ns=demand-backup-incremental-sharded-31890 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.aC9UBwMju6 ++ mktemp + local LAST_ERR=/tmp/tmp.OlTNCIQIJ9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aC9UBwMju6 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.OlTNCIQIJ9 + rm /tmp/tmp.aC9UBwMju6 /tmp/tmp.OlTNCIQIJ9 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19846 backup-aws-s3-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19846 backup-azure-blob-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19846 backup-gcp-cs-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19846 backup-minio-not-base-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base-sharded patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-sharded-19846 backup-minio-sharded --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qv0gpmhIpc ++ mktemp + local LAST_ERR=/tmp/tmp.x9GJq76aZP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qv0gpmhIpc customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.x9GJq76aZP + rm /tmp/tmp.qv0gpmhIpc /tmp/tmp.x9GJq76aZP + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.7DCuRkZiyy ++ mktemp + local LAST_ERR=/tmp/tmp.rTIh4Ijnx2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7DCuRkZiyy + cat /tmp/tmp.rTIh4Ijnx2 + rm /tmp/tmp.7DCuRkZiyy /tmp/tmp.rTIh4Ijnx2 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.pXaZrpYyYe ++ mktemp + local LAST_ERR=/tmp/tmp.iunOzWTdGN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pXaZrpYyYe + cat /tmp/tmp.iunOzWTdGN + rm /tmp/tmp.pXaZrpYyYe /tmp/tmp.iunOzWTdGN + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.JdSFTGQzX0 ++ mktemp + local LAST_ERR=/tmp/tmp.fTX89EtEOO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JdSFTGQzX0 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.fTX89EtEOO + rm /tmp/tmp.JdSFTGQzX0 /tmp/tmp.fTX89EtEOO + return 0 + check_crd_for_deletion PR-2106-f35e19f6 + local git_tag=PR-2106-f35e19f6 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2106-f35e19f6/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EBlutwtj7n +++ mktemp ++ local LAST_ERR=/tmp/tmp.e7vGawc9Ls ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.EBlutwtj7n ++ cat /tmp/tmp.e7vGawc9Ls Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.EBlutwtj7n ++ cat /tmp/tmp.e7vGawc9Ls Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.EBlutwtj7n ++ cat /tmp/tmp.e7vGawc9Ls Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.EBlutwtj7n ++ cat /tmp/tmp.e7vGawc9Ls Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.EBlutwtj7n /tmp/tmp.e7vGawc9Ls ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.WYgf8CGOya + local LAST_OUT=/tmp/tmp.KbfBxOCScb ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Zklux5AfsY + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.yT2UA2ntE3 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WYgf8CGOya + cat /tmp/tmp.Zklux5AfsY + rm /tmp/tmp.WYgf8CGOya /tmp/tmp.Zklux5AfsY + return 0 namespace "demand-backup-incremental-sharded-19846" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KbfBxOCScb namespace "psmdb-operator" deleted + cat /tmp/tmp.yT2UA2ntE3 + rm /tmp/tmp.KbfBxOCScb /tmp/tmp.yT2UA2ntE3 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jhjmoA8A28 ++ mktemp + local LAST_ERR=/tmp/tmp.hWV27RNGfT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jhjmoA8A28 + cat /tmp/tmp.hWV27RNGfT + rm /tmp/tmp.jhjmoA8A28 /tmp/tmp.hWV27RNGfT + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Nk5vEuNJlg ++ mktemp + local LAST_ERR=/tmp/tmp.MTuvL5DNFQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nk5vEuNJlg namespace/psmdb-operator created + cat /tmp/tmp.MTuvL5DNFQ + rm /tmp/tmp.Nk5vEuNJlg /tmp/tmp.MTuvL5DNFQ + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4MFBaRB7Lu +++ mktemp ++ local LAST_ERR=/tmp/tmp.FBmRBEMeoE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4MFBaRB7Lu ++ cat /tmp/tmp.FBmRBEMeoE ++ rm /tmp/tmp.4MFBaRB7Lu /tmp/tmp.FBmRBEMeoE ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2106-f35e19f6-1-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.iSS4E4HvG1 ++ mktemp + local LAST_ERR=/tmp/tmp.A5DxSHA9DW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2106-f35e19f6-1-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iSS4E4HvG1 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2106-f35e19f6-1-cluster8" modified. + cat /tmp/tmp.A5DxSHA9DW + rm /tmp/tmp.iSS4E4HvG1 /tmp/tmp.A5DxSHA9DW + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2106-f35e19f6' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2106-f35e19f6 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.PF29VSwNTU ++ mktemp + local LAST_ERR=/tmp/tmp.2V3qdVt7F9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PF29VSwNTU customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.2V3qdVt7F9 + rm /tmp/tmp.PF29VSwNTU /tmp/tmp.2V3qdVt7F9 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.anpDLX90Yi ++ mktemp + local LAST_ERR=/tmp/tmp.YfMvFgwoB3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.anpDLX90Yi clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.YfMvFgwoB3 + rm /tmp/tmp.anpDLX90Yi /tmp/tmp.YfMvFgwoB3 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2106-f35e19f6") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GoEF3fV9AQ ++ mktemp + local LAST_ERR=/tmp/tmp.zjlL2B3ngy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GoEF3fV9AQ deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.zjlL2B3ngy + rm /tmp/tmp.GoEF3fV9AQ /tmp/tmp.zjlL2B3ngy + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hbq4BQvmHx +++ mktemp ++ local LAST_ERR=/tmp/tmp.9jIYPvB816 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Hbq4BQvmHx ++ cat /tmp/tmp.9jIYPvB816 ++ rm /tmp/tmp.Hbq4BQvmHx /tmp/tmp.9jIYPvB816 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-cf7f9f459-rh6mz + local pod=percona-server-mongodb-operator-cf7f9f459-rh6mz + set +o xtrace waiting for pod/percona-server-mongodb-operator-cf7f9f459-rh6mz to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.BeamTmlOKh +++ mktemp ++ local LAST_ERR=/tmp/tmp.l9NOP3IBrj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BeamTmlOKh ++ cat /tmp/tmp.l9NOP3IBrj ++ rm /tmp/tmp.BeamTmlOKh /tmp/tmp.l9NOP3IBrj ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-cf7f9f459-rh6mz ++ mktemp + local LAST_OUT=/tmp/tmp.q6UVI8m9kL ++ mktemp + local LAST_ERR=/tmp/tmp.ZUtwj8Pmm3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-cf7f9f459-rh6mz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q6UVI8m9kL + cat /tmp/tmp.ZUtwj8Pmm3 + rm /tmp/tmp.q6UVI8m9kL /tmp/tmp.ZUtwj8Pmm3 + return 0 2025-11-10T02:25:12.386Z INFO setup Manager starting up {"gitCommit": "f35e19f64f8e7b98f6f4de5edd18b37a6eb22bbb", "gitBranch": "PR-2106-f35e19f6", "buildTime": "", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-incremental-sharded-31890 + local namespace=demand-backup-incremental-sharded-31890 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces demand-backup-incremental-sharded-31890' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-incremental-sharded-31890 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-incremental-sharded-31890 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.4MKUbHL3tn egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.p88xlwsY7s ++ mktemp + local LAST_ERR=/tmp/tmp.HFvJBcZNBo + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.AtiCLuJsXy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-incremental-sharded-31890 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4MKUbHL3tn + cat /tmp/tmp.HFvJBcZNBo + rm /tmp/tmp.4MKUbHL3tn /tmp/tmp.HFvJBcZNBo + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p88xlwsY7s + cat /tmp/tmp.AtiCLuJsXy + rm /tmp/tmp.p88xlwsY7s /tmp/tmp.AtiCLuJsXy + return 0 + kubectl_bin wait --for=delete namespace demand-backup-incremental-sharded-31890 ++ mktemp + local LAST_OUT=/tmp/tmp.RYt1Hu987o ++ mktemp + local LAST_ERR=/tmp/tmp.jHNLNaaTDg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-incremental-sharded-31890 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RYt1Hu987o + cat /tmp/tmp.jHNLNaaTDg + rm /tmp/tmp.RYt1Hu987o /tmp/tmp.jHNLNaaTDg + return 0 + desc 'create namespace demand-backup-incremental-sharded-31890' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-incremental-sharded-31890 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-incremental-sharded-31890 ++ mktemp + local LAST_OUT=/tmp/tmp.tk7tRJNQBQ ++ mktemp + local LAST_ERR=/tmp/tmp.RIpef4CAU5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-incremental-sharded-31890 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tk7tRJNQBQ namespace/demand-backup-incremental-sharded-31890 created + cat /tmp/tmp.RIpef4CAU5 + rm /tmp/tmp.tk7tRJNQBQ /tmp/tmp.RIpef4CAU5 + return 0 + set_kube_ctx demand-backup-incremental-sharded-31890 + local namespace=demand-backup-incremental-sharded-31890 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.LfiNusP8iQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.i1oDfMUYfT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LfiNusP8iQ ++ cat /tmp/tmp.i1oDfMUYfT ++ rm /tmp/tmp.LfiNusP8iQ /tmp/tmp.i1oDfMUYfT ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2106-f35e19f6-1-cluster8 --namespace=demand-backup-incremental-sharded-31890 ++ mktemp + local LAST_OUT=/tmp/tmp.wRpaV4p7Vm ++ mktemp + local LAST_ERR=/tmp/tmp.zJVWwvS2ps + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2106-f35e19f6-1-cluster8 --namespace=demand-backup-incremental-sharded-31890 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wRpaV4p7Vm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2106-f35e19f6-1-cluster8" modified. + cat /tmp/tmp.zJVWwvS2ps + rm /tmp/tmp.wRpaV4p7Vm /tmp/tmp.zJVWwvS2ps + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Mon Nov 10 02:25:50 2025 NAMESPACE: demand-backup-incremental-sharded-31890 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-incremental-sharded-31890.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-incremental-sharded-31890 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-incremental-sharded-31890 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-incremental-sharded-31890 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-incremental-sharded-31890 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aPdOAdqmoe +++ mktemp ++ local LAST_ERR=/tmp/tmp.PyfQOEs8G1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aPdOAdqmoe ++ cat /tmp/tmp.PyfQOEs8G1 ++ rm /tmp/tmp.aPdOAdqmoe /tmp/tmp.PyfQOEs8G1 ++ return 0 + MINIO_POD=minio-service-d9589b474-gwwbg + wait_pod minio-service-d9589b474-gwwbg + local pod=minio-service-d9589b474-gwwbg + set +o xtrace waiting for pod/minio-service-d9589b474-gwwbg to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-sharded-31890.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.svOCh9iTDj ++ mktemp + local LAST_ERR=/tmp/tmp.7ekpFmwlpQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-sharded-31890.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.svOCh9iTDj service/minio-service created + cat /tmp/tmp.7ekpFmwlpQ + rm /tmp/tmp.svOCh9iTDj /tmp/tmp.7ekpFmwlpQ + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.x5iL25pkkY ++ mktemp + local LAST_ERR=/tmp/tmp.4EMiSRaamv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x5iL25pkkY make_bucket: operator-testing pod "aws-cli" deleted from demand-backup-incremental-sharded-31890 namespace + cat /tmp/tmp.4EMiSRaamv + rm /tmp/tmp.x5iL25pkkY /tmp/tmp.4EMiSRaamv + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.uj2Kosh1gc ++ mktemp + local LAST_ERR=/tmp/tmp.9AeqMhGNr8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uj2Kosh1gc secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.9AeqMhGNr8 + rm /tmp/tmp.uj2Kosh1gc /tmp/tmp.9AeqMhGNr8 + return 0 + desc 'Testing on sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on sharded cluster ----------------------------------------------------------------------------------- + log 'Creating PSMDB cluster' + set +o xtrace [2025-11-10T02:26:29+0000] Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.HP8qgIqRYJ ++ mktemp + local LAST_ERR=/tmp/tmp.ArKQq2bTYa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HP8qgIqRYJ secret/some-users created + cat /tmp/tmp.ArKQq2bTYa + rm /tmp/tmp.HP8qgIqRYJ /tmp/tmp.ArKQq2bTYa + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/some-name-sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/some-name-sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/some-name-sharded.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2106-f35e19f6"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.Gx1HwEiAIR ++ mktemp + local LAST_ERR=/tmp/tmp.tl07MZ80TT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Gx1HwEiAIR perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.tl07MZ80TT + rm /tmp/tmp.Gx1HwEiAIR /tmp/tmp.tl07MZ80TT + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.IuNant6cBV ++ mktemp + local LAST_ERR=/tmp/tmp.Ufgi7zE7wl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IuNant6cBV deployment.apps/psmdb-client created + cat /tmp/tmp.Ufgi7zE7wl + rm /tmp/tmp.IuNant6cBV /tmp/tmp.Ufgi7zE7wl + return 0 + log 'check if all pods started' + set +o xtrace [2025-11-10T02:26:34+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GY5vypy69H +++ mktemp ++ local LAST_ERR=/tmp/tmp.nopsTXnlSw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GY5vypy69H ++ cat /tmp/tmp.nopsTXnlSw ++ rm /tmp/tmp.GY5vypy69H /tmp/tmp.nopsTXnlSw ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vZfFOF2z7j +++ mktemp ++ local LAST_ERR=/tmp/tmp.F0v10nE2EW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vZfFOF2z7j ++ cat /tmp/tmp.F0v10nE2EW ++ rm /tmp/tmp.vZfFOF2z7j /tmp/tmp.F0v10nE2EW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ziolJR97Et +++ mktemp ++ local LAST_ERR=/tmp/tmp.pUkjHomODJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ziolJR97Et ++ cat /tmp/tmp.pUkjHomODJ ++ rm /tmp/tmp.ziolJR97Et /tmp/tmp.pUkjHomODJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R2luvE9tuW +++ mktemp ++ local LAST_ERR=/tmp/tmp.CVJ17qDBYj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R2luvE9tuW ++ cat /tmp/tmp.CVJ17qDBYj ++ rm /tmp/tmp.R2luvE9tuW /tmp/tmp.CVJ17qDBYj ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6auYusiISb +++ mktemp ++ local LAST_ERR=/tmp/tmp.BK4Qty31b2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6auYusiISb ++ cat /tmp/tmp.BK4Qty31b2 ++ rm /tmp/tmp.6auYusiISb /tmp/tmp.BK4Qty31b2 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b1b5jUmaMk +++ mktemp ++ local LAST_ERR=/tmp/tmp.QAzJ1VZQnK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b1b5jUmaMk ++ cat /tmp/tmp.QAzJ1VZQnK ++ rm /tmp/tmp.b1b5jUmaMk /tmp/tmp.QAzJ1VZQnK ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MAXi5250TO +++ mktemp ++ local LAST_ERR=/tmp/tmp.5mRKZ7uXiW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MAXi5250TO ++ cat /tmp/tmp.5mRKZ7uXiW ++ rm /tmp/tmp.MAXi5250TO /tmp/tmp.5mRKZ7uXiW ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2Jlum3zgdx +++ mktemp ++ local LAST_ERR=/tmp/tmp.5M8FujwVVW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2Jlum3zgdx ++ cat /tmp/tmp.5M8FujwVVW ++ rm /tmp/tmp.2Jlum3zgdx /tmp/tmp.5M8FujwVVW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qa3t2dSO6m +++ mktemp ++ local LAST_ERR=/tmp/tmp.fc9pdqT6eJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qa3t2dSO6m ++ cat /tmp/tmp.fc9pdqT6eJ ++ rm /tmp/tmp.Qa3t2dSO6m /tmp/tmp.fc9pdqT6eJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.biXuNGD331 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ueviUCiuGk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.biXuNGD331 ++ cat /tmp/tmp.ueviUCiuGk ++ rm /tmp/tmp.biXuNGD331 /tmp/tmp.ueviUCiuGk ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK ++ kubectl_bin get svc some-name-mongos '-o=jsonpath={.status}' ++ jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0] | if .ip then .ip else .hostname end' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bQ0re5Q5i3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZNrdpCgjs9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get svc some-name-mongos '-o=jsonpath={.status}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bQ0re5Q5i3 ++ cat /tmp/tmp.ZNrdpCgjs9 ++ rm /tmp/tmp.bQ0re5Q5i3 /tmp/tmp.ZNrdpCgjs9 ++ return 0 + lbEndpoint=34.46.144.11 + '[' -z 34.46.144.11 ']' + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VaJJFkD5AX +++ mktemp ++ local LAST_ERR=/tmp/tmp.brU7spEOKc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VaJJFkD5AX ++ cat /tmp/tmp.brU7spEOKc ++ rm /tmp/tmp.VaJJFkD5AX /tmp/tmp.brU7spEOKc ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.6ceJhUhEj9 ++ mktemp + local LAST_ERR=/tmp/tmp.FqTw17iAmP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6ceJhUhEj9 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("1b3d88c5-8fb1-49a8-83f1-4c556f816992") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.FqTw17iAmP + rm /tmp/tmp.6ceJhUhEj9 /tmp/tmp.FqTw17iAmP + return 0 + sleep 1 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xdiyeRwB8q +++ mktemp ++ local LAST_ERR=/tmp/tmp.MZI9QvZKRH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xdiyeRwB8q ++ cat /tmp/tmp.MZI9QvZKRH ++ rm /tmp/tmp.xdiyeRwB8q /tmp/tmp.MZI9QvZKRH ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.qaDjfovK6O ++ mktemp + local LAST_ERR=/tmp/tmp.hovp3wjYtj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qaDjfovK6O Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("9ff094c8-6b33-48d6-8d92-2ba454664076") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.hovp3wjYtj + rm /tmp/tmp.qaDjfovK6O /tmp/tmp.hovp3wjYtj + return 0 + sleep 5 + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ awk -F: '{print $2}' ++ echo .svc.cluster.local egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ROsvoYziYI +++ mktemp ++ local LAST_ERR=/tmp/tmp.1JvAaH7xZV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ROsvoYziYI ++ cat /tmp/tmp.1JvAaH7xZV ++ rm /tmp/tmp.ROsvoYziYI /tmp/tmp.1JvAaH7xZV ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.fljXwfDckJ ++ mktemp + local LAST_ERR=/tmp/tmp.UDWDx27QJs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fljXwfDckJ + cat /tmp/tmp.UDWDx27QJs + rm /tmp/tmp.fljXwfDckJ /tmp/tmp.UDWDx27QJs + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.Ncy5tVg2lu/find-sharded + log 'waiting 60 seconds for stable timestamp in wiredtiger' + set +o xtrace [2025-11-10T02:29:36+0000] waiting 60 seconds for stable timestamp in wiredtiger + sleep 80 + log 'running backups' + set +o xtrace [2025-11-10T02:30:56+0000] running backups + '[' -z '' ']' + backup_name_aws=backup-aws-s3-sharded + backup_name_gcp=backup-gcp-cs-sharded + backup_name_azure=backup-azure-blob-sharded + run_backup aws-s3 backup-aws-s3-sharded + local storage=aws-s3 + local backup_name=backup-aws-s3-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-aws-s3-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: aws-s3/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GKqn0wr0AA ++ mktemp + local LAST_ERR=/tmp/tmp.fhvESiptaZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GKqn0wr0AA perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3-sharded created + cat /tmp/tmp.fhvESiptaZ + rm /tmp/tmp.GKqn0wr0AA /tmp/tmp.fhvESiptaZ + return 0 + run_backup gcp-cs backup-gcp-cs-sharded + local storage=gcp-cs + local backup_name=backup-gcp-cs-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-gcp-cs-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: gcp-cs/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.rDcuGqXBJT ++ mktemp + local LAST_ERR=/tmp/tmp.xHYod9C9YZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rDcuGqXBJT perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs-sharded created + cat /tmp/tmp.xHYod9C9YZ + rm /tmp/tmp.rDcuGqXBJT /tmp/tmp.xHYod9C9YZ + return 0 + run_backup azure-blob backup-azure-blob-sharded + local storage=azure-blob + local backup_name=backup-azure-blob-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-azure-blob-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: azure-blob/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.avpgIbaqOK ++ mktemp + local LAST_ERR=/tmp/tmp.lCu82gJ5J2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.avpgIbaqOK perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob-sharded created + cat /tmp/tmp.lCu82gJ5J2 + rm /tmp/tmp.avpgIbaqOK /tmp/tmp.lCu82gJ5J2 + return 0 + wait_backup backup-aws-s3-sharded + local backup_name=backup-aws-s3-sharded + local target_state=ready + set +o xtrace waiting for backup-aws-s3-sharded to reach ready state..................OK + check_backup_in_storage backup-aws-s3-sharded s3 rs0 + local backup=backup-aws-s3-sharded + local storage_type=s3 + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=s3.amazonaws.com ++ get_backup_dest backup-aws-s3-sharded ++ local backup_name=backup-aws-s3-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|azure://||' +++ mktemp ++ sed 's|s3://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.sIPBXX8SkC +++ mktemp ++ local LAST_ERR=/tmp/tmp.irYV0m9ptP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sIPBXX8SkC ++ cat /tmp/tmp.irYV0m9ptP ++ rm /tmp/tmp.sIPBXX8SkC /tmp/tmp.irYV0m9ptP ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z + [[ s3 == \m\i\n\i\o ]] + local url=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/rs0/filelist.pbm + log 'checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/rs0/filelist.pbm exists' + set +o xtrace [2025-11-10T02:31:36+0000] checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/rs0/filelist.pbm exists + curl --fail --head https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 9126 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK x-amz-id-2: ZNQ7HCMSboZf4cgQNdCuq4w2Vz0v2lpw99CicMKn8NB1RFTAexgt37fk8wIumSaPMvQDUqtJ+aIjwfiINUvT4B2paLAgLLUTxg+sHRfYFr8= x-amz-request-id: RMZVYN07JX0F6QC1 Date: Mon, 10 Nov 2025 02:31:38 GMT Last-Modified: Mon, 10 Nov 2025 02:31:24 GMT x-amz-expiration: expiry-date="Wed, 12 Nov 2025 00:00:00 GMT", rule-id="1 Days Cleanup" ETag: "64c37182bedb2126aaf20f7ecde488c0" x-amz-server-side-encryption: AES256 Accept-Ranges: bytes Content-Type: application/octet-stream Content-Length: 9126 Server: AmazonS3 + check_backup_in_storage backup-aws-s3-sharded s3 cfg + local backup=backup-aws-s3-sharded + local storage_type=s3 + local replset=cfg + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=s3.amazonaws.com ++ get_backup_dest backup-aws-s3-sharded ++ local backup_name=backup-aws-s3-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' ++ sed 's|gs://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ffPJ2jliBA +++ mktemp ++ local LAST_ERR=/tmp/tmp.RzJtidVNOE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-aws-s3-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ffPJ2jliBA ++ cat /tmp/tmp.RzJtidVNOE ++ rm /tmp/tmp.ffPJ2jliBA /tmp/tmp.RzJtidVNOE ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z + [[ s3 == \m\i\n\i\o ]] + local url=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/cfg/filelist.pbm + log 'checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/cfg/filelist.pbm exists' + set +o xtrace [2025-11-10T02:31:38+0000] checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/cfg/filelist.pbm exists + curl --fail --head https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z/cfg/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 17484 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK x-amz-id-2: i0h1VAu8tzFuWp1RHYH9www16dCLGSw4xmL3xSjM4oJUdJdy8Xku1Sj//fJpHAu+uv18geUfeGE= x-amz-request-id: GDACETG90BDZYBRC Date: Mon, 10 Nov 2025 02:31:39 GMT Last-Modified: Mon, 10 Nov 2025 02:31:31 GMT x-amz-expiration: expiry-date="Wed, 12 Nov 2025 00:00:00 GMT", rule-id="1 Days Cleanup" ETag: "ee3b1e23d847eacf8baec15e8a406415" x-amz-server-side-encryption: AES256 Accept-Ranges: bytes Content-Type: application/octet-stream Content-Length: 17484 Server: AmazonS3 + wait_backup backup-gcp-cs-sharded + local backup_name=backup-gcp-cs-sharded + local target_state=ready + set +o xtrace waiting for backup-gcp-cs-sharded to reach ready state.............OK + check_backup_in_storage backup-gcp-cs-sharded gcs rs0 + local backup=backup-gcp-cs-sharded + local storage_type=gcs + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=storage.googleapis.com ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ get_backup_dest backup-gcp-cs-sharded ++ local backup_name=backup-gcp-cs-sharded ++ kubectl_bin get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.MRHI378sSi +++ mktemp ++ local LAST_ERR=/tmp/tmp.eKflSCEx6Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MRHI378sSi ++ cat /tmp/tmp.eKflSCEx6Q ++ rm /tmp/tmp.MRHI378sSi /tmp/tmp.eKflSCEx6Q ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z + [[ gcs == \m\i\n\i\o ]] + local url=https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/rs0/filelist.pbm + log 'checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/rs0/filelist.pbm exists' + set +o xtrace [2025-11-10T02:32:04+0000] checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/rs0/filelist.pbm exists + curl --fail --head https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 9264 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/2 200 content-type: application/octet-stream x-guploader-uploadid: AOCedOFzKabPe-3CiIF0o4jyu9s_9hgE_gr5z9lQZU5ZleNwqClRL_VARCdDSXuSvLR44g32 expires: Mon, 10 Nov 2025 03:32:04 GMT date: Mon, 10 Nov 2025 02:32:04 GMT cache-control: public, max-age=3600 last-modified: Mon, 10 Nov 2025 02:31:56 GMT etag: "fdb9cc9767e6e40f88b06b46920ebc0a" x-goog-generation: 1762741916456487 x-goog-metageneration: 1 x-goog-stored-content-encoding: identity x-goog-stored-content-length: 9264 x-goog-hash: crc32c=kMkl/w== x-goog-hash: md5=/bnMl2fm5A+IsGtGkg68Cg== x-goog-expiration: Tue, 11 Nov 2025 02:31:56 GMT x-goog-storage-class: REGIONAL accept-ranges: bytes content-length: 9264 server: UploadServer alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + check_backup_in_storage backup-gcp-cs-sharded gcs cfg + local backup=backup-gcp-cs-sharded + local storage_type=gcs + local replset=cfg + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=storage.googleapis.com ++ get_backup_dest backup-gcp-cs-sharded ++ local backup_name=backup-gcp-cs-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.kYTTLzes1L +++ mktemp ++ local LAST_ERR=/tmp/tmp.8bkczqXyKG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kYTTLzes1L ++ cat /tmp/tmp.8bkczqXyKG ++ rm /tmp/tmp.kYTTLzes1L /tmp/tmp.8bkczqXyKG ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z + [[ gcs == \m\i\n\i\o ]] + local url=https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/cfg/filelist.pbm + log 'checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/cfg/filelist.pbm exists' + set +o xtrace [2025-11-10T02:32:05+0000] checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/cfg/filelist.pbm exists + curl --fail --head https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z/cfg/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 17484 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/2 200 content-type: application/octet-stream x-guploader-uploadid: AOCedOGRAORD7MfrUkvwzJU8QluLbFhWnQXvEi4LPq9bHIom1OB0ibV8KNoDCl2HVmtDoeukSsHXc-s expires: Mon, 10 Nov 2025 03:32:05 GMT date: Mon, 10 Nov 2025 02:32:05 GMT cache-control: public, max-age=3600 last-modified: Mon, 10 Nov 2025 02:31:56 GMT etag: "8c9848412475df501d17b25f8fdbc539" x-goog-generation: 1762741916630505 x-goog-metageneration: 1 x-goog-stored-content-encoding: identity x-goog-stored-content-length: 17484 x-goog-hash: crc32c=ZlBAZA== x-goog-hash: md5=jJhIQSR131AdF7Jfj9vFOQ== x-goog-expiration: Tue, 11 Nov 2025 02:31:56 GMT x-goog-storage-class: REGIONAL accept-ranges: bytes content-length: 17484 server: UploadServer alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + wait_backup backup-azure-blob-sharded + local backup_name=backup-azure-blob-sharded + local target_state=ready + set +o xtrace waiting for backup-azure-blob-sharded to reach ready state................OK + check_backup_in_storage backup-azure-blob-sharded azure rs0 + local backup=backup-azure-blob-sharded + local storage_type=azure + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net ++ get_backup_dest backup-azure-blob-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ local backup_name=backup-azure-blob-sharded ++ kubectl_bin get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ sed 's|s3://||' ++ local LAST_OUT=/tmp/tmp.tF5IHbizHo +++ mktemp ++ local LAST_ERR=/tmp/tmp.nFf1ESK7K2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tF5IHbizHo ++ cat /tmp/tmp.nFf1ESK7K2 ++ rm /tmp/tmp.tF5IHbizHo /tmp/tmp.nFf1ESK7K2 ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z + [[ azure == \m\i\n\i\o ]] + local url=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/rs0/filelist.pbm + log 'checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/rs0/filelist.pbm exists' + set +o xtrace [2025-11-10T02:32:38+0000] checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/rs0/filelist.pbm exists + curl --fail --head https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 11156 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK Content-Length: 11156 Content-Type: application/octet-stream Content-MD5: zBvYk0r4C1tlcPZPTUhrJQ== Last-Modified: Mon, 10 Nov 2025 02:32:22 GMT ETag: 0x8DE200160B38103 Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 x-ms-request-id: 0ea4b60d-901e-0034-5bea-51eca7000000 x-ms-version: 2009-09-19 x-ms-lease-status: unlocked x-ms-blob-type: BlockBlob Date: Mon, 10 Nov 2025 02:32:38 GMT + check_backup_in_storage backup-azure-blob-sharded azure cfg + local backup=backup-azure-blob-sharded + local storage_type=azure + local replset=cfg + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net ++ get_backup_dest backup-azure-blob-sharded ++ local backup_name=backup-azure-blob-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HfqTffY3IJ +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_ERR=/tmp/tmp.BX3nBEKL1X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-azure-blob-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HfqTffY3IJ ++ cat /tmp/tmp.BX3nBEKL1X ++ rm /tmp/tmp.HfqTffY3IJ /tmp/tmp.BX3nBEKL1X ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z + [[ azure == \m\i\n\i\o ]] + local url=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/cfg/filelist.pbm + log 'checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/cfg/filelist.pbm exists' + set +o xtrace [2025-11-10T02:32:39+0000] checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/cfg/filelist.pbm exists + curl --fail --head https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z/cfg/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 18309 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK Content-Length: 18309 Content-Type: application/octet-stream Content-MD5: T4G0ndZsO/JNgDj/6/x9vg== Last-Modified: Mon, 10 Nov 2025 02:32:30 GMT ETag: 0x8DE200165DDD78A Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 x-ms-request-id: 97334212-401e-0018-09ea-510008000000 x-ms-version: 2009-09-19 x-ms-lease-status: unlocked x-ms-blob-type: BlockBlob Date: Mon, 10 Nov 2025 02:32:40 GMT + backup_name_minio=backup-minio-sharded + run_backup minio backup-minio-sharded + local storage=minio + local backup_name=backup-minio-sharded + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.UO0m7cwsGx ++ mktemp + local LAST_ERR=/tmp/tmp.pgsAZSmTYb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UO0m7cwsGx perconaservermongodbbackup.psmdb.percona.com/backup-minio-sharded created + cat /tmp/tmp.pgsAZSmTYb + rm /tmp/tmp.UO0m7cwsGx /tmp/tmp.pgsAZSmTYb + return 0 + wait_backup backup-minio-sharded + local backup_name=backup-minio-sharded + local target_state=ready + set +o xtrace waiting for backup-minio-sharded to reach ready state............OK + check_backup_in_storage backup-minio-sharded minio rs0 + local backup=backup-minio-sharded + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio-sharded ++ local backup_name=backup-minio-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ sed -e 's/.json$//' ++ kubectl_bin get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ sed 's|azure://||' ++ sed 's|s3://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.J2LdDhDuMD +++ mktemp ++ local LAST_ERR=/tmp/tmp.FxSCE4yocd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J2LdDhDuMD ++ cat /tmp/tmp.FxSCE4yocd ++ rm /tmp/tmp.J2LdDhDuMD /tmp/tmp.FxSCE4yocd ++ return 0 + backup_dest=operator-testing/2025-11-10T02:32:42Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-11-10T02:32:42Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.tQgUs9XxcK ++ mktemp + local LAST_ERR=/tmp/tmp.zRAlnIkRNr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-11-10T02:32:42Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tQgUs9XxcK + cat /tmp/tmp.zRAlnIkRNr + rm /tmp/tmp.tQgUs9XxcK /tmp/tmp.zRAlnIkRNr + return 0 2025-11-10 02:33:01 11156 filelist.pbm + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4MHVOjnTCr +++ mktemp ++ local LAST_ERR=/tmp/tmp.wjUNvgbOqL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4MHVOjnTCr ++ cat /tmp/tmp.wjUNvgbOqL ++ rm /tmp/tmp.4MHVOjnTCr /tmp/tmp.wjUNvgbOqL ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.mQkWZOVGB8 ++ mktemp + local LAST_ERR=/tmp/tmp.CDe0cbPUAj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mQkWZOVGB8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("1ef4383a-8679-43fc-854e-e795e9cd43ed") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.CDe0cbPUAj + rm /tmp/tmp.mQkWZOVGB8 /tmp/tmp.CDe0cbPUAj + return 0 + sleep 5 + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 -not-base-sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local postfix=-not-base-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lOcz378om2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kEHkSxrhJf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lOcz378om2 ++ cat /tmp/tmp.kEHkSxrhJf ++ rm /tmp/tmp.lOcz378om2 /tmp/tmp.kEHkSxrhJf ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.65JGWkauTi ++ mktemp + local LAST_ERR=/tmp/tmp.u0n478B8tq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.65JGWkauTi + cat /tmp/tmp.u0n478B8tq + rm /tmp/tmp.65JGWkauTi /tmp/tmp.u0n478B8tq + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/find-not-base-sharded.json /tmp/tmp.Ncy5tVg2lu/find-not-base-sharded + backup_name_minio_not_base=backup-minio-not-base-sharded + run_backup minio backup-minio-not-base-sharded false + local storage=minio + local backup_name=backup-minio-not-base-sharded + local base=false + local backup_type=incremental + [[ false == \t\r\u\e ]] + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-not-base-sharded/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.j4AqJid0Rj ++ mktemp + local LAST_ERR=/tmp/tmp.AoebrAxAYl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j4AqJid0Rj perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base-sharded created + cat /tmp/tmp.AoebrAxAYl + rm /tmp/tmp.j4AqJid0Rj /tmp/tmp.AoebrAxAYl + return 0 + wait_backup backup-minio-not-base-sharded + local backup_name=backup-minio-not-base-sharded + local target_state=ready + set +o xtrace waiting for backup-minio-not-base-sharded to reach ready state......OK + check_backup_in_storage backup-minio-not-base-sharded minio rs0 + local backup=backup-minio-not-base-sharded + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio-not-base-sharded ++ local backup_name=backup-minio-not-base-sharded ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio-not-base-sharded -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.A1JJUtuhl0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.n8V4syAhD7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-not-base-sharded -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A1JJUtuhl0 ++ cat /tmp/tmp.n8V4syAhD7 ++ rm /tmp/tmp.A1JJUtuhl0 /tmp/tmp.n8V4syAhD7 ++ return 0 + backup_dest=operator-testing/2025-11-10T02:33:24Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-11-10T02:33:24Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.f120Rbw2tY ++ mktemp + local LAST_ERR=/tmp/tmp.09KVJo4KKg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-11-10T02:33:24Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f120Rbw2tY + cat /tmp/tmp.09KVJo4KKg + rm /tmp/tmp.f120Rbw2tY /tmp/tmp.09KVJo4KKg + return 0 2025-11-10 02:33:31 11156 filelist.pbm + '[' -z '' ']' + run_restore backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + log 'drop collection' + set +o xtrace [2025-11-10T02:33:40+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ndw4TGDiLT +++ mktemp ++ local LAST_ERR=/tmp/tmp.HoaxcFH3EX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ndw4TGDiLT ++ cat /tmp/tmp.HoaxcFH3EX ++ rm /tmp/tmp.ndw4TGDiLT /tmp/tmp.HoaxcFH3EX ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.nDwslHjsnT ++ mktemp + local LAST_ERR=/tmp/tmp.KEbcyzniox + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nDwslHjsnT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3d76b342-c8b9-444e-9764-18a59f968d9c") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.KEbcyzniox + rm /tmp/tmp.nDwslHjsnT /tmp/tmp.KEbcyzniox + return 0 + log 'check backup and restore -- backup-aws-s3-sharded' + set +o xtrace [2025-11-10T02:33:43+0000] check backup and restore -- backup-aws-s3-sharded + /usr/sbin/sed -e 's/name:/name: restore-backup-aws-s3-sharded/' + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/backupName:/backupName: backup-aws-s3-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.chp9a7bkBR ++ mktemp + local LAST_ERR=/tmp/tmp.jHZfLbCYda + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.chp9a7bkBR perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3-sharded created + cat /tmp/tmp.jHZfLbCYda + rm /tmp/tmp.chp9a7bkBR /tmp/tmp.jHZfLbCYda + return 0 + run_recovery_check backup-aws-s3-sharded _restore_sharded + local backup_name=backup-aws-s3-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-aws-s3-sharded some-name requested 0 3000 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3-sharded object to be created.OK Waiting psmdb-restore/restore-backup-aws-s3-sharded to reach state "requested" .....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-31890", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.GRbkVcI1vm ++ mktemp + local LAST_ERR=/tmp/tmp.mCKaycKGpB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GRbkVcI1vm + cat /tmp/tmp.mCKaycKGpB + rm /tmp/tmp.GRbkVcI1vm /tmp/tmp.mCKaycKGpB + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-11-10T02:38:01+0000] compare_kubectl: statefulset/some-name-rs0 OK + wait_restore backup-aws-s3-sharded some-name ready 0 3000 + local backup_name=backup-aws-s3-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3-sharded object to be created.OK Waiting psmdb-restore/restore-backup-aws-s3-sharded to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jjO2zxmqIg ++ mktemp + local LAST_ERR=/tmp/tmp.8NHHAABzyF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jjO2zxmqIg apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-31890"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod8.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-11-10T02:26:33Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-31890 resourceVersion: "1762742434470767010" uid: e8fe1f54-2a3b-4975-8295-2eead72beb9d spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.22.0 image: perconalab/percona-server-mongodb-operator:main-mongod8.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f backupImage: perconalab/percona-server-mongodb-operator:main-backup backupVersion: 2.12.0 conditions: - lastTransitionTime: "2025-11-10T02:26:33Z" status: "True" type: sharding - lastTransitionTime: "2025-11-10T02:26:36Z" status: "True" type: initializing - lastTransitionTime: "2025-11-10T02:28:13Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-11-10T02:34:07Z" message: 'reconcile replset rs0: reconcile StatefulSet for rs0: update StatefulSet some-name-rs0: Operation cannot be fulfilled on statefulsets.apps "some-name-rs0": the object has been modified; please apply your changes to the latest version and try again' reason: ErrorReconcile status: "True" type: error host: 34.46.144.11 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod8.0 mongoVersion: 8.0.12-4 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.8NHHAABzyF + rm /tmp/tmp.jjO2zxmqIg /tmp/tmp.8NHHAABzyF + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.48QzXngPQ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kTsUngjkx3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.48QzXngPQ4 ++ cat /tmp/tmp.kTsUngjkx3 ++ rm /tmp/tmp.48QzXngPQ4 /tmp/tmp.kTsUngjkx3 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vYXEXKyDB9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xHXVf72BcL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vYXEXKyDB9 ++ cat /tmp/tmp.xHXVf72BcL ++ rm /tmp/tmp.vYXEXKyDB9 /tmp/tmp.xHXVf72BcL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GYdR49QsFW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ky4OMzQ2ak ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GYdR49QsFW ++ cat /tmp/tmp.Ky4OMzQ2ak ++ rm /tmp/tmp.GYdR49QsFW /tmp/tmp.Ky4OMzQ2ak ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TNyEgjUmOB +++ mktemp ++ local LAST_ERR=/tmp/tmp.UhFUgxfHNp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TNyEgjUmOB ++ cat /tmp/tmp.UhFUgxfHNp ++ rm /tmp/tmp.TNyEgjUmOB /tmp/tmp.UhFUgxfHNp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ymXXlbJtpC +++ mktemp ++ local LAST_ERR=/tmp/tmp.NuCjqAsWBp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ymXXlbJtpC ++ cat /tmp/tmp.NuCjqAsWBp ++ rm /tmp/tmp.ymXXlbJtpC /tmp/tmp.NuCjqAsWBp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ssztYLYiYd +++ mktemp ++ local LAST_ERR=/tmp/tmp.svA4Azjeff ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ssztYLYiYd ++ cat /tmp/tmp.svA4Azjeff ++ rm /tmp/tmp.ssztYLYiYd /tmp/tmp.svA4Azjeff ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZWWcOlTuRg +++ mktemp ++ local LAST_ERR=/tmp/tmp.b0cgSLsZl5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZWWcOlTuRg ++ cat /tmp/tmp.b0cgSLsZl5 ++ rm /tmp/tmp.ZWWcOlTuRg /tmp/tmp.b0cgSLsZl5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3S65EuB8lu +++ mktemp ++ local LAST_ERR=/tmp/tmp.4J1Jy00oeJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3S65EuB8lu ++ cat /tmp/tmp.4J1Jy00oeJ ++ rm /tmp/tmp.3S65EuB8lu /tmp/tmp.4J1Jy00oeJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lEXY9LrslB +++ mktemp ++ local LAST_ERR=/tmp/tmp.DiHAxZVmFg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lEXY9LrslB ++ cat /tmp/tmp.DiHAxZVmFg ++ rm /tmp/tmp.lEXY9LrslB /tmp/tmp.DiHAxZVmFg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hMwdF5ftpV +++ mktemp ++ local LAST_ERR=/tmp/tmp.rDiCBMscTZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hMwdF5ftpV ++ cat /tmp/tmp.rDiCBMscTZ ++ rm /tmp/tmp.hMwdF5ftpV /tmp/tmp.rDiCBMscTZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q2Q7yUGJKf +++ mktemp ++ local LAST_ERR=/tmp/tmp.G9XpVVB935 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q2Q7yUGJKf ++ cat /tmp/tmp.G9XpVVB935 ++ rm /tmp/tmp.q2Q7yUGJKf /tmp/tmp.G9XpVVB935 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ppFeT41Ied +++ mktemp ++ local LAST_ERR=/tmp/tmp.DC9eACL79k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ppFeT41Ied ++ cat /tmp/tmp.DC9eACL79k ++ rm /tmp/tmp.ppFeT41Ied /tmp/tmp.DC9eACL79k ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish..OK + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rMvJhKqudL +++ mktemp ++ local LAST_ERR=/tmp/tmp.FDsfnE2ioL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rMvJhKqudL ++ cat /tmp/tmp.FDsfnE2ioL ++ rm /tmp/tmp.rMvJhKqudL /tmp/tmp.FDsfnE2ioL ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.eaWic3wjSI ++ mktemp + local LAST_ERR=/tmp/tmp.ShcKxMYkhk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eaWic3wjSI + cat /tmp/tmp.ShcKxMYkhk + rm /tmp/tmp.eaWic3wjSI /tmp/tmp.ShcKxMYkhk + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.Ncy5tVg2lu/find-sharded + check_exported_mongos_service_endpoint 34.46.144.11 + local host=34.46.144.11 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.04b0yR69ma +++ mktemp ++ local LAST_ERR=/tmp/tmp.uyj12XfMze ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.04b0yR69ma ++ cat /tmp/tmp.uyj12XfMze ++ rm /tmp/tmp.04b0yR69ma /tmp/tmp.uyj12XfMze ++ return 0 + '[' 34.46.144.11 '!=' 34.46.144.11 ']' + run_restore backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + log 'drop collection' + set +o xtrace [2025-11-10T02:42:50+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9l2IygXU0F +++ mktemp ++ local LAST_ERR=/tmp/tmp.TJwSJA8NJk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9l2IygXU0F ++ cat /tmp/tmp.TJwSJA8NJk ++ rm /tmp/tmp.9l2IygXU0F /tmp/tmp.TJwSJA8NJk ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.CjRrnbMrhp ++ mktemp + local LAST_ERR=/tmp/tmp.9m2Y8jQzv6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CjRrnbMrhp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("dfb501e6-8cf4-4c30-b8e7-795c0919aeb5") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.9m2Y8jQzv6 + rm /tmp/tmp.CjRrnbMrhp /tmp/tmp.9m2Y8jQzv6 + return 0 + log 'check backup and restore -- backup-gcp-cs-sharded' + set +o xtrace [2025-11-10T02:42:52+0000] check backup and restore -- backup-gcp-cs-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-gcp-cs-sharded/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-gcp-cs-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ak5izxloF3 ++ mktemp + local LAST_ERR=/tmp/tmp.fGvHEdHZto + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ak5izxloF3 perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs-sharded created + cat /tmp/tmp.fGvHEdHZto + rm /tmp/tmp.ak5izxloF3 /tmp/tmp.fGvHEdHZto + return 0 + run_recovery_check backup-gcp-cs-sharded _restore_sharded + local backup_name=backup-gcp-cs-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-gcp-cs-sharded some-name requested 0 3000 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs-sharded object to be created.OK Waiting psmdb-restore/restore-backup-gcp-cs-sharded to reach state "requested" .....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-31890", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.86b2KSECtV ++ mktemp + local LAST_ERR=/tmp/tmp.zmfskAsQcB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.86b2KSECtV + cat /tmp/tmp.zmfskAsQcB + rm /tmp/tmp.86b2KSECtV /tmp/tmp.zmfskAsQcB + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-11-10T02:47:21+0000] compare_kubectl: statefulset/some-name-rs0 OK + wait_restore backup-gcp-cs-sharded some-name ready 0 3000 + local backup_name=backup-gcp-cs-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs-sharded object to be created.OK Waiting psmdb-restore/restore-backup-gcp-cs-sharded to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ic2mzjPgol ++ mktemp + local LAST_ERR=/tmp/tmp.idJk5XM6fP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ic2mzjPgol apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-31890"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod8.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-11-10T02:26:33Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-31890 resourceVersion: "1762742990391071010" uid: e8fe1f54-2a3b-4975-8295-2eead72beb9d spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.22.0 image: perconalab/percona-server-mongodb-operator:main-mongod8.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f backupImage: perconalab/percona-server-mongodb-operator:main-backup backupVersion: 2.12.0 conditions: - lastTransitionTime: "2025-11-10T02:26:33Z" status: "True" type: sharding - lastTransitionTime: "2025-11-10T02:26:36Z" status: "True" type: initializing - lastTransitionTime: "2025-11-10T02:28:13Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-11-10T02:34:07Z" message: 'reconcile replset cfg: reconcile StatefulSet for cfg: update StatefulSet some-name-cfg: Operation cannot be fulfilled on statefulsets.apps "some-name-cfg": the object has been modified; please apply your changes to the latest version and try again' reason: ErrorReconcile status: "True" type: error host: 34.46.144.11 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod8.0 mongoVersion: 8.0.12-4 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.idJk5XM6fP + rm /tmp/tmp.ic2mzjPgol /tmp/tmp.idJk5XM6fP + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Nswk3HoHFQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CUPwWjfyoW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Nswk3HoHFQ ++ cat /tmp/tmp.CUPwWjfyoW ++ rm /tmp/tmp.Nswk3HoHFQ /tmp/tmp.CUPwWjfyoW ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fRhbmeQH2y +++ mktemp ++ local LAST_ERR=/tmp/tmp.l1SahUCS4d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fRhbmeQH2y ++ cat /tmp/tmp.l1SahUCS4d ++ rm /tmp/tmp.fRhbmeQH2y /tmp/tmp.l1SahUCS4d ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pA4RA1WJbe +++ mktemp ++ local LAST_ERR=/tmp/tmp.7LeJDGQPqm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pA4RA1WJbe ++ cat /tmp/tmp.7LeJDGQPqm ++ rm /tmp/tmp.pA4RA1WJbe /tmp/tmp.7LeJDGQPqm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dZZzEUif1o +++ mktemp ++ local LAST_ERR=/tmp/tmp.OfEPWqLw3s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dZZzEUif1o ++ cat /tmp/tmp.OfEPWqLw3s ++ rm /tmp/tmp.dZZzEUif1o /tmp/tmp.OfEPWqLw3s ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xvqxt39Nah +++ mktemp ++ local LAST_ERR=/tmp/tmp.qEr9Ezhh7i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xvqxt39Nah ++ cat /tmp/tmp.qEr9Ezhh7i ++ rm /tmp/tmp.Xvqxt39Nah /tmp/tmp.qEr9Ezhh7i ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XNtwBkhQ47 +++ mktemp ++ local LAST_ERR=/tmp/tmp.K1OCbXjNTe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XNtwBkhQ47 ++ cat /tmp/tmp.K1OCbXjNTe ++ rm /tmp/tmp.XNtwBkhQ47 /tmp/tmp.K1OCbXjNTe ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wd8CkLI8dj +++ mktemp ++ local LAST_ERR=/tmp/tmp.HNvhlfAA49 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wd8CkLI8dj ++ cat /tmp/tmp.HNvhlfAA49 ++ rm /tmp/tmp.Wd8CkLI8dj /tmp/tmp.HNvhlfAA49 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Kib2x74QhD +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nrlp63WweN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Kib2x74QhD ++ cat /tmp/tmp.Nrlp63WweN ++ rm /tmp/tmp.Kib2x74QhD /tmp/tmp.Nrlp63WweN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IEJCR26tEV +++ mktemp ++ local LAST_ERR=/tmp/tmp.8skgkxK9g1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IEJCR26tEV ++ cat /tmp/tmp.8skgkxK9g1 ++ rm /tmp/tmp.IEJCR26tEV /tmp/tmp.8skgkxK9g1 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y1rz0lYyxK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ge169VOb8j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y1rz0lYyxK ++ cat /tmp/tmp.Ge169VOb8j ++ rm /tmp/tmp.y1rz0lYyxK /tmp/tmp.Ge169VOb8j ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish..OK + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pUfbPLYJ8R +++ mktemp ++ local LAST_ERR=/tmp/tmp.wBWOiAQWkm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pUfbPLYJ8R ++ cat /tmp/tmp.wBWOiAQWkm ++ rm /tmp/tmp.pUfbPLYJ8R /tmp/tmp.wBWOiAQWkm ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.plovBiZ4yr ++ mktemp + local LAST_ERR=/tmp/tmp.wcmnhQsc3a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.plovBiZ4yr + cat /tmp/tmp.wcmnhQsc3a + rm /tmp/tmp.plovBiZ4yr /tmp/tmp.wcmnhQsc3a + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.Ncy5tVg2lu/find-sharded + check_exported_mongos_service_endpoint 34.46.144.11 + local host=34.46.144.11 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.McIis7RnTw +++ mktemp ++ local LAST_ERR=/tmp/tmp.j02dDgveGS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.McIis7RnTw ++ cat /tmp/tmp.j02dDgveGS ++ rm /tmp/tmp.McIis7RnTw /tmp/tmp.j02dDgveGS ++ return 0 + '[' 34.46.144.11 '!=' 34.46.144.11 ']' + run_restore backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + log 'drop collection' + set +o xtrace [2025-11-10T02:51:43+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4jTkTUoigM +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cxum7VfMCh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4jTkTUoigM ++ cat /tmp/tmp.Cxum7VfMCh ++ rm /tmp/tmp.4jTkTUoigM /tmp/tmp.Cxum7VfMCh ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Kh3KF713uJ ++ mktemp + local LAST_ERR=/tmp/tmp.ywLXQ7HwBF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Kh3KF713uJ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("30ad56fa-1586-4135-9ab0-a315660a8392") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.ywLXQ7HwBF + rm /tmp/tmp.Kh3KF713uJ /tmp/tmp.ywLXQ7HwBF + return 0 + log 'check backup and restore -- backup-azure-blob-sharded' + set +o xtrace [2025-11-10T02:51:46+0000] check backup and restore -- backup-azure-blob-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-azure-blob-sharded/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-azure-blob-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.SczVYbORSZ ++ mktemp + local LAST_ERR=/tmp/tmp.BZhvEib0I1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SczVYbORSZ perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob-sharded created + cat /tmp/tmp.BZhvEib0I1 + rm /tmp/tmp.SczVYbORSZ /tmp/tmp.BZhvEib0I1 + return 0 + run_recovery_check backup-azure-blob-sharded _restore_sharded + local backup_name=backup-azure-blob-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-azure-blob-sharded some-name requested 0 3000 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob-sharded object to be created.OK Waiting psmdb-restore/restore-backup-azure-blob-sharded to reach state "requested" ....OK after 3 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-31890", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.I3gnAUM56M ++ mktemp + local LAST_ERR=/tmp/tmp.11tfr7YCNK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I3gnAUM56M + cat /tmp/tmp.11tfr7YCNK + rm /tmp/tmp.I3gnAUM56M /tmp/tmp.11tfr7YCNK + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-11-10T02:55:52+0000] compare_kubectl: statefulset/some-name-rs0 OK + wait_restore backup-azure-blob-sharded some-name ready 0 3000 + local backup_name=backup-azure-blob-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob-sharded object to be created.OK Waiting psmdb-restore/restore-backup-azure-blob-sharded to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.FBsJkhGP31 ++ mktemp + local LAST_ERR=/tmp/tmp.SjkYKV9E4V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FBsJkhGP31 apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-31890"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod8.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-11-10T02:26:33Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-31890 resourceVersion: "1762743506988735010" uid: e8fe1f54-2a3b-4975-8295-2eead72beb9d spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.22.0 image: perconalab/percona-server-mongodb-operator:main-mongod8.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f backupImage: perconalab/percona-server-mongodb-operator:main-backup backupVersion: 2.12.0 conditions: - lastTransitionTime: "2025-11-10T02:26:33Z" status: "True" type: sharding - lastTransitionTime: "2025-11-10T02:26:36Z" status: "True" type: initializing - lastTransitionTime: "2025-11-10T02:28:13Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-11-10T02:34:07Z" message: 'dial: ping mongo: server selection error: context deadline exceeded, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-31890.svc.cluster.local:27017, Type: RSSecondary, Tag sets: serviceName=some-name,zone=us-central1-a,podName=some-name-cfg-0,region=us-central1,nodeName=gke-jen-psmdb-2106-f35e1-default-pool-78c462d8-6t1r, Average RTT: 1269384 }, { Addr: some-name-cfg-1.some-name-cfg.demand-backup-incremental-sharded-31890.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp: lookup some-name-cfg-1.some-name-cfg.demand-backup-incremental-sharded-31890.svc.cluster.local on 34.118.224.10:53: no such host }, { Addr: some-name-cfg-2.some-name-cfg.demand-backup-incremental-sharded-31890.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp: lookup some-name-cfg-2.some-name-cfg.demand-backup-incremental-sharded-31890.svc.cluster.local on 34.118.224.10:53: no such host }, ] }' reason: ErrorReconcile status: "True" type: error host: 34.46.144.11 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod8.0 mongoVersion: 8.0.12-4 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.SjkYKV9E4V + rm /tmp/tmp.FBsJkhGP31 /tmp/tmp.SjkYKV9E4V + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J2L4p3DRL3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.X53XvYkyVz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J2L4p3DRL3 ++ cat /tmp/tmp.X53XvYkyVz ++ rm /tmp/tmp.J2L4p3DRL3 /tmp/tmp.X53XvYkyVz ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c60u3jMyCo +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZzYi8nkPgd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c60u3jMyCo ++ cat /tmp/tmp.ZzYi8nkPgd ++ rm /tmp/tmp.c60u3jMyCo /tmp/tmp.ZzYi8nkPgd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4NupJX6pNq +++ mktemp ++ local LAST_ERR=/tmp/tmp.src2Wtc6vl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4NupJX6pNq ++ cat /tmp/tmp.src2Wtc6vl ++ rm /tmp/tmp.4NupJX6pNq /tmp/tmp.src2Wtc6vl ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JwXCfnOXp3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QskD2YXgkD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JwXCfnOXp3 ++ cat /tmp/tmp.QskD2YXgkD ++ rm /tmp/tmp.JwXCfnOXp3 /tmp/tmp.QskD2YXgkD ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gE13CvBkBS +++ mktemp ++ local LAST_ERR=/tmp/tmp.T1ncUw0njQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gE13CvBkBS ++ cat /tmp/tmp.T1ncUw0njQ ++ rm /tmp/tmp.gE13CvBkBS /tmp/tmp.T1ncUw0njQ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zUK38VX2Py +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ft40qXsQjx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zUK38VX2Py ++ cat /tmp/tmp.Ft40qXsQjx ++ rm /tmp/tmp.zUK38VX2Py /tmp/tmp.Ft40qXsQjx ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6KdKCADrhj +++ mktemp ++ local LAST_ERR=/tmp/tmp.zwNaAvyTmU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6KdKCADrhj ++ cat /tmp/tmp.zwNaAvyTmU ++ rm /tmp/tmp.6KdKCADrhj /tmp/tmp.zwNaAvyTmU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cWvXeJKO9A +++ mktemp ++ local LAST_ERR=/tmp/tmp.FdHXU3uVXo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cWvXeJKO9A ++ cat /tmp/tmp.FdHXU3uVXo ++ rm /tmp/tmp.cWvXeJKO9A /tmp/tmp.FdHXU3uVXo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DBy47KmanB +++ mktemp ++ local LAST_ERR=/tmp/tmp.DjKVpOwccR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DBy47KmanB ++ cat /tmp/tmp.DjKVpOwccR ++ rm /tmp/tmp.DBy47KmanB /tmp/tmp.DjKVpOwccR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bWhaELRdZW +++ mktemp ++ local LAST_ERR=/tmp/tmp.zZnNsG0AUE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bWhaELRdZW ++ cat /tmp/tmp.zZnNsG0AUE ++ rm /tmp/tmp.bWhaELRdZW /tmp/tmp.zZnNsG0AUE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fi07KoYxMX +++ mktemp ++ local LAST_ERR=/tmp/tmp.ReF1UheS1j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fi07KoYxMX ++ cat /tmp/tmp.ReF1UheS1j ++ rm /tmp/tmp.Fi07KoYxMX /tmp/tmp.ReF1UheS1j ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AobbNrHEK8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.l2DXCQMLqW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AobbNrHEK8 ++ cat /tmp/tmp.l2DXCQMLqW ++ rm /tmp/tmp.AobbNrHEK8 /tmp/tmp.l2DXCQMLqW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.am1GeRYGQ6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bB6XUxp8jx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.am1GeRYGQ6 ++ cat /tmp/tmp.bB6XUxp8jx ++ rm /tmp/tmp.am1GeRYGQ6 /tmp/tmp.bB6XUxp8jx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EAIzB7DIfv +++ mktemp ++ local LAST_ERR=/tmp/tmp.NmBN6Lyaac ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EAIzB7DIfv ++ cat /tmp/tmp.NmBN6Lyaac ++ rm /tmp/tmp.EAIzB7DIfv /tmp/tmp.NmBN6Lyaac ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish..OK + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3KSexbtuJG +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kzr52K9zC0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3KSexbtuJG ++ cat /tmp/tmp.Kzr52K9zC0 ++ rm /tmp/tmp.3KSexbtuJG /tmp/tmp.Kzr52K9zC0 ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.I67mwbVuwe ++ mktemp + local LAST_ERR=/tmp/tmp.4fiYg2CknN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I67mwbVuwe + cat /tmp/tmp.4fiYg2CknN + rm /tmp/tmp.I67mwbVuwe /tmp/tmp.4fiYg2CknN + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.Ncy5tVg2lu/find-sharded + check_exported_mongos_service_endpoint 34.46.144.11 + local host=34.46.144.11 ++ kubectl_bin get psmdb some-name '-o=jsonpath={.status.host}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jJZZ1Sicxw +++ mktemp ++ local LAST_ERR=/tmp/tmp.jPBMyvfk5I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name '-o=jsonpath={.status.host}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jJZZ1Sicxw ++ cat /tmp/tmp.jPBMyvfk5I ++ rm /tmp/tmp.jJZZ1Sicxw /tmp/tmp.jPBMyvfk5I ++ return 0 + '[' 34.46.144.11 '!=' 34.46.144.11 ']' + run_restore backup-minio-not-base-sharded _restore_sharded + local backup_name=backup-minio-not-base-sharded + log 'drop collection' + set +o xtrace [2025-11-10T03:01:03+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HHrO9VTzN8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OC8iaOGqhH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HHrO9VTzN8 ++ cat /tmp/tmp.OC8iaOGqhH ++ rm /tmp/tmp.HHrO9VTzN8 /tmp/tmp.OC8iaOGqhH ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.akCZheY4dS ++ mktemp + local LAST_ERR=/tmp/tmp.qcudSVK3CH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.akCZheY4dS Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c7f025ad-9cfe-41b2-9f6c-5533e9f52f2c") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.qcudSVK3CH + rm /tmp/tmp.akCZheY4dS /tmp/tmp.qcudSVK3CH + return 0 + log 'check backup and restore -- backup-minio-not-base-sharded' + set +o xtrace [2025-11-10T03:01:06+0000] check backup and restore -- backup-minio-not-base-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-not-base-sharded/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-not-base-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.TfbN5yx539 ++ mktemp + local LAST_ERR=/tmp/tmp.HklvfrY7xU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TfbN5yx539 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-not-base-sharded created + cat /tmp/tmp.HklvfrY7xU + rm /tmp/tmp.TfbN5yx539 /tmp/tmp.HklvfrY7xU + return 0 + run_recovery_check backup-minio-not-base-sharded _restore_sharded false + local backup_name=backup-minio-not-base-sharded + local compare_suffix=_restore_sharded + local base=false + wait_restore backup-minio-not-base-sharded some-name requested 0 3000 + local backup_name=backup-minio-not-base-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base-sharded object to be created.OK Waiting psmdb-restore/restore-backup-minio-not-base-sharded to reach state "requested" .....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-31890", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.VQ6egPUJli ++ mktemp + local LAST_ERR=/tmp/tmp.PJLcNlbIJw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VQ6egPUJli + cat /tmp/tmp.PJLcNlbIJw + rm /tmp/tmp.VQ6egPUJli /tmp/tmp.PJLcNlbIJw + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-11-10T03:05:20+0000] compare_kubectl: statefulset/some-name-rs0 OK + wait_restore backup-minio-not-base-sharded some-name ready 0 3000 + local backup_name=backup-minio-not-base-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base-sharded object to be created.OK Waiting psmdb-restore/restore-backup-minio-not-base-sharded to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jNpL3sWHsB ++ mktemp + local LAST_ERR=/tmp/tmp.JfnmAyhquO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jNpL3sWHsB apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-31890"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod8.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-11-10T02:26:33Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-31890 resourceVersion: "1762744059048511010" uid: e8fe1f54-2a3b-4975-8295-2eead72beb9d spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.22.0 image: perconalab/percona-server-mongodb-operator:main-mongod8.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f backupImage: perconalab/percona-server-mongodb-operator:main-backup backupVersion: 2.12.0 conditions: - lastTransitionTime: "2025-11-10T02:26:33Z" status: "True" type: sharding - lastTransitionTime: "2025-11-10T02:26:36Z" status: "True" type: initializing - lastTransitionTime: "2025-11-10T02:28:13Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-11-10T02:34:07Z" message: 'reconcile replset cfg: reconcile StatefulSet for cfg: update StatefulSet some-name-cfg: Operation cannot be fulfilled on statefulsets.apps "some-name-cfg": the object has been modified; please apply your changes to the latest version and try again' reason: ErrorReconcile status: "True" type: error host: 34.46.144.11 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod8.0 mongoVersion: 8.0.12-4 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.JfnmAyhquO + rm /tmp/tmp.jNpL3sWHsB /tmp/tmp.JfnmAyhquO + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.doi9PFsH3s +++ mktemp ++ local LAST_ERR=/tmp/tmp.DLw6Ondzla ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.doi9PFsH3s ++ cat /tmp/tmp.DLw6Ondzla ++ rm /tmp/tmp.doi9PFsH3s /tmp/tmp.DLw6Ondzla ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P7DM4YNXq6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AMilkBXEq6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P7DM4YNXq6 ++ cat /tmp/tmp.AMilkBXEq6 ++ rm /tmp/tmp.P7DM4YNXq6 /tmp/tmp.AMilkBXEq6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PDtrxtN9TE +++ mktemp ++ local LAST_ERR=/tmp/tmp.7OXuNaugw3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PDtrxtN9TE ++ cat /tmp/tmp.7OXuNaugw3 ++ rm /tmp/tmp.PDtrxtN9TE /tmp/tmp.7OXuNaugw3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BE6IP8l9ka +++ mktemp ++ local LAST_ERR=/tmp/tmp.3XupiCyFcF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BE6IP8l9ka ++ cat /tmp/tmp.3XupiCyFcF ++ rm /tmp/tmp.BE6IP8l9ka /tmp/tmp.3XupiCyFcF ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LEjQNKqSAg +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZjasnF2UwG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LEjQNKqSAg ++ cat /tmp/tmp.ZjasnF2UwG ++ rm /tmp/tmp.LEjQNKqSAg /tmp/tmp.ZjasnF2UwG ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SjDp8n0lEk +++ mktemp ++ local LAST_ERR=/tmp/tmp.tiTTpdtMUi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SjDp8n0lEk ++ cat /tmp/tmp.tiTTpdtMUi ++ rm /tmp/tmp.SjDp8n0lEk /tmp/tmp.tiTTpdtMUi ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hLkSkGCeqb +++ mktemp ++ local LAST_ERR=/tmp/tmp.tpAhEhgg01 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hLkSkGCeqb ++ cat /tmp/tmp.tpAhEhgg01 ++ rm /tmp/tmp.hLkSkGCeqb /tmp/tmp.tpAhEhgg01 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aTXaIYdDRP +++ mktemp ++ local LAST_ERR=/tmp/tmp.uJVMl3c638 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aTXaIYdDRP ++ cat /tmp/tmp.uJVMl3c638 ++ rm /tmp/tmp.aTXaIYdDRP /tmp/tmp.uJVMl3c638 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CTaqtfLkOD +++ mktemp ++ local LAST_ERR=/tmp/tmp.739wTl8Vc9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CTaqtfLkOD ++ cat /tmp/tmp.739wTl8Vc9 ++ rm /tmp/tmp.CTaqtfLkOD /tmp/tmp.739wTl8Vc9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0DFR8JNhUK +++ mktemp ++ local LAST_ERR=/tmp/tmp.t3ofWvESBf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0DFR8JNhUK ++ cat /tmp/tmp.t3ofWvESBf ++ rm /tmp/tmp.0DFR8JNhUK /tmp/tmp.t3ofWvESBf ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V79Q54OcOH +++ mktemp ++ local LAST_ERR=/tmp/tmp.3RzIKkqIui ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V79Q54OcOH ++ cat /tmp/tmp.3RzIKkqIui ++ rm /tmp/tmp.V79Q54OcOH /tmp/tmp.3RzIKkqIui ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HQ8HlbwO09 +++ mktemp ++ local LAST_ERR=/tmp/tmp.blAjVenN9i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HQ8HlbwO09 ++ cat /tmp/tmp.blAjVenN9i ++ rm /tmp/tmp.HQ8HlbwO09 /tmp/tmp.blAjVenN9i ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gZ0pUNxZOk +++ mktemp ++ local LAST_ERR=/tmp/tmp.cuVhMDOgr0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gZ0pUNxZOk ++ cat /tmp/tmp.cuVhMDOgr0 ++ rm /tmp/tmp.gZ0pUNxZOk /tmp/tmp.cuVhMDOgr0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VrBw95jlqD +++ mktemp ++ local LAST_ERR=/tmp/tmp.L6uLNNpHju ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VrBw95jlqD ++ cat /tmp/tmp.L6uLNNpHju ++ rm /tmp/tmp.VrBw95jlqD /tmp/tmp.L6uLNNpHju ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.OK + [[ false == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 -not-base-sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local postfix=-not-base-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TxE1bxruhb +++ mktemp ++ local LAST_ERR=/tmp/tmp.1pxKnFVnHl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TxE1bxruhb ++ cat /tmp/tmp.1pxKnFVnHl ++ rm /tmp/tmp.TxE1bxruhb /tmp/tmp.1pxKnFVnHl ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.rAQT999z9T ++ mktemp + local LAST_ERR=/tmp/tmp.RleLNK6nVV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rAQT999z9T + cat /tmp/tmp.RleLNK6nVV + rm /tmp/tmp.rAQT999z9T /tmp/tmp.RleLNK6nVV + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/find-not-base-sharded.json /tmp/tmp.Ncy5tVg2lu/find-not-base-sharded + run_restore backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + log 'drop collection' + set +o xtrace [2025-11-10T03:10:08+0000] drop collection + run_mongos 'use myApp\n db.test.drop()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PIuZHDutzl +++ mktemp ++ local LAST_ERR=/tmp/tmp.SpOSjU7dHs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PIuZHDutzl ++ cat /tmp/tmp.SpOSjU7dHs ++ rm /tmp/tmp.PIuZHDutzl /tmp/tmp.SpOSjU7dHs ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.L61oIw2q31 ++ mktemp + local LAST_ERR=/tmp/tmp.9xtr7XFnc2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L61oIw2q31 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("e0b1848e-0939-4515-9720-760a0cb209e5") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.9xtr7XFnc2 + rm /tmp/tmp.L61oIw2q31 /tmp/tmp.9xtr7XFnc2 + return 0 + log 'check backup and restore -- backup-minio-sharded' + set +o xtrace [2025-11-10T03:10:09+0000] check backup and restore -- backup-minio-sharded + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-sharded/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-sharded/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.z2zrlPTuTB ++ mktemp + local LAST_ERR=/tmp/tmp.Niq2M17wAT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z2zrlPTuTB perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-sharded created + cat /tmp/tmp.Niq2M17wAT + rm /tmp/tmp.z2zrlPTuTB /tmp/tmp.Niq2M17wAT + return 0 + run_recovery_check backup-minio-sharded _restore_sharded + local backup_name=backup-minio-sharded + local compare_suffix=_restore_sharded + local base=true + wait_restore backup-minio-sharded some-name requested 0 3000 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-sharded object to be created.OK Waiting psmdb-restore/restore-backup-minio-sharded to reach state "requested" .....OK after 4 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore_sharded + local resource=statefulset/some-name-rs0 + local postfix=_restore_sharded + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml + local new_result=/tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-sharded-31890", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.2SjckFVZzU ++ mktemp + local LAST_ERR=/tmp/tmp.Y5bLlPu7wp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2SjckFVZzU + cat /tmp/tmp.Y5bLlPu7wp + rm /tmp/tmp.2SjckFVZzU /tmp/tmp.Y5bLlPu7wp + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/statefulset_some-name-rs0_restore_sharded.yml /tmp/tmp.Ncy5tVg2lu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-11-10T03:14:37+0000] compare_kubectl: statefulset/some-name-rs0 OK + wait_restore backup-minio-sharded some-name ready 0 3000 + local backup_name=backup-minio-sharded + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-sharded object to be created.OK Waiting psmdb-restore/restore-backup-minio-sharded to reach state "ready" ...OK after 2 minutes + [[ 0 -eq 1 ]] + kubectl_bin get psmdb some-name -o yaml ++ mktemp + local LAST_OUT=/tmp/tmp.r9PkudvaPP ++ mktemp + local LAST_ERR=/tmp/tmp.06TDhbbgko + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb some-name -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r9PkudvaPP apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDB metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDB","metadata":{"annotations":{},"name":"some-name","namespace":"demand-backup-incremental-sharded-31890"},"spec":{"backup":{"enabled":true,"image":"perconalab/percona-server-mongodb-operator:main-backup","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"azure-blob":{"azure":{"container":"operator-testing","credentialsSecret":"azure-secret","prefix":"psmdb-demand-backup-incremental-sharded"},"type":"azure"},"gcp-cs":{"s3":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","insecureSkipTLSVerify":false,"prefix":"psmdb-demand-backup-incremental-sharded","region":"us-east-1"},"type":"s3"},"minio":{"main":true,"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service:9000/","insecureSkipTLSVerify":false,"region":"us-east-1"},"type":"s3"}},"tasks":[{"compressionType":"gzip","enabled":true,"name":"weekly","schedule":"0 0 * * 0","storageName":"aws-s3"}]},"image":"perconalab/percona-server-mongodb-operator:main-mongod8.0","imagePullPolicy":"Always","replsets":[{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"name":"rs0","resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}}],"secrets":{"users":"some-users"},"sharding":{"configsvrReplSet":{"affinity":{"antiAffinityTopologyKey":"none"},"configuration":"operationProfiling:\n mode: slowOp\n slowOpThresholdMs: 100\nsecurity:\n enableEncryption: true\n redactClientLogData: false\nsetParameter:\n ttlMonitorSleepSecs: 60\n wiredTigerConcurrentReadTransactions: 128\n wiredTigerConcurrentWriteTransactions: 128\nstorage:\n engine: wiredTiger\n wiredTiger:\n collectionConfig:\n blockCompressor: snappy\n engineConfig:\n directoryForIndexes: false\n journalCompressor: snappy\n indexConfig:\n prefixCompression: true\n","expose":{"enabled":false,"type":"ClusterIP"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"affinity":{"antiAffinityTopologyKey":"none"},"expose":{"type":"LoadBalancer"},"resources":{"limits":{"cpu":"500m","memory":"1G"},"requests":{"cpu":"100m","memory":"0.1G"}},"size":3}},"upgradeOptions":{"apply":"Never"}}} percona.com/resync-pbm: "true" creationTimestamp: "2025-11-10T02:26:33Z" generation: 2 name: some-name namespace: demand-backup-incremental-sharded-31890 resourceVersion: "1762744615116975010" uid: e8fe1f54-2a3b-4975-8295-2eead72beb9d spec: backup: enabled: true image: perconalab/percona-server-mongodb-operator:main-backup storages: aws-s3: s3: bucket: operator-testing credentialsSecret: aws-s3-secret insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 azure-blob: azure: container: operator-testing credentialsSecret: azure-secret prefix: psmdb-demand-backup-incremental-sharded type: azure gcp-cs: s3: bucket: operator-testing credentialsSecret: gcp-cs-secret endpointUrl: https://storage.googleapis.com insecureSkipTLSVerify: false prefix: psmdb-demand-backup-incremental-sharded region: us-east-1 type: s3 minio: main: true s3: bucket: operator-testing credentialsSecret: minio-secret endpointUrl: http://minio-service:9000/ insecureSkipTLSVerify: false region: us-east-1 type: s3 tasks: - compressionType: gzip enabled: true name: weekly schedule: 0 0 * * 0 storageName: aws-s3 crVersion: 1.22.0 image: perconalab/percona-server-mongodb-operator:main-mongod8.0 imagePullPolicy: Always replsets: - affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP name: rs0 resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi secrets: users: some-users sharding: configsvrReplSet: affinity: antiAffinityTopologyKey: none configuration: | operationProfiling: mode: slowOp slowOpThresholdMs: 100 security: enableEncryption: true redactClientLogData: false setParameter: ttlMonitorSleepSecs: 60 wiredTigerConcurrentReadTransactions: 128 wiredTigerConcurrentWriteTransactions: 128 storage: engine: wiredTiger wiredTiger: collectionConfig: blockCompressor: snappy engineConfig: directoryForIndexes: false journalCompressor: snappy indexConfig: prefixCompression: true expose: enabled: false type: ClusterIP resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 volumeSpec: persistentVolumeClaim: resources: requests: storage: 3Gi enabled: true mongos: affinity: antiAffinityTopologyKey: none expose: type: LoadBalancer resources: limits: cpu: 500m memory: 1G requests: cpu: 100m memory: 0.1G size: 3 upgradeOptions: apply: Never status: backupConfigHash: 657556472134be8a3e4b574eeba7d6db2138970454efb713e0add6290c72d12f backupImage: perconalab/percona-server-mongodb-operator:main-backup backupVersion: 2.12.0 conditions: - lastTransitionTime: "2025-11-10T02:26:33Z" status: "True" type: sharding - lastTransitionTime: "2025-11-10T02:26:36Z" status: "True" type: initializing - lastTransitionTime: "2025-11-10T02:28:13Z" message: 'rs0: ready' reason: RSReady status: "True" type: ready - lastTransitionTime: "2025-11-10T02:34:07Z" message: 'handle ReplicaSetNoPrimary: get standalone mongo client: ping mongo: server selection error: server selection timeout, current topology: { Type: Single, Servers: [{ Addr: some-name-cfg-0.some-name-cfg.demand-backup-incremental-sharded-31890.svc.cluster.local:27017, Type: Unknown, Last error: dial tcp 10.229.225.51:27017: connect: connection refused }, ] }' reason: ErrorReconcile status: "True" type: error host: 34.46.144.11 mongoImage: perconalab/percona-server-mongodb-operator:main-mongod8.0 mongoVersion: 8.0.12-4 mongos: ready: 0 size: 0 status: initializing observedGeneration: 2 ready: 6 replsets: cfg: initialized: true ready: 3 size: 3 status: ready rs0: added_as_shard: true initialized: true ready: 3 size: 3 status: ready size: 6 state: initializing + cat /tmp/tmp.06TDhbbgko + rm /tmp/tmp.r9PkudvaPP /tmp/tmp.06TDhbbgko + return 0 ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VwGeAGRUtQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.1HnoWHI7O7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VwGeAGRUtQ ++ cat /tmp/tmp.1HnoWHI7O7 ++ rm /tmp/tmp.VwGeAGRUtQ /tmp/tmp.1HnoWHI7O7 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xMkJVlphDM +++ mktemp ++ local LAST_ERR=/tmp/tmp.NVTiUqO6V2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xMkJVlphDM ++ cat /tmp/tmp.NVTiUqO6V2 ++ rm /tmp/tmp.xMkJVlphDM /tmp/tmp.NVTiUqO6V2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o0dgX66Tf4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kJ6zlaAnUR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o0dgX66Tf4 ++ cat /tmp/tmp.kJ6zlaAnUR ++ rm /tmp/tmp.o0dgX66Tf4 /tmp/tmp.kJ6zlaAnUR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.woxzXPZoIm +++ mktemp ++ local LAST_ERR=/tmp/tmp.EhPSMfMoXb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.woxzXPZoIm ++ cat /tmp/tmp.EhPSMfMoXb ++ rm /tmp/tmp.woxzXPZoIm /tmp/tmp.EhPSMfMoXb ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cBbHVkkXbC +++ mktemp ++ local LAST_ERR=/tmp/tmp.elxCGxeXot ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cBbHVkkXbC ++ cat /tmp/tmp.elxCGxeXot ++ rm /tmp/tmp.cBbHVkkXbC /tmp/tmp.elxCGxeXot ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fNSA7yfMsV +++ mktemp ++ local LAST_ERR=/tmp/tmp.NfZaeQPG5S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fNSA7yfMsV ++ cat /tmp/tmp.NfZaeQPG5S ++ rm /tmp/tmp.fNSA7yfMsV /tmp/tmp.NfZaeQPG5S ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xgj8U7aH6a +++ mktemp ++ local LAST_ERR=/tmp/tmp.RJ5iX9pIvN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xgj8U7aH6a ++ cat /tmp/tmp.RJ5iX9pIvN ++ rm /tmp/tmp.Xgj8U7aH6a /tmp/tmp.RJ5iX9pIvN ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X2KodDHPJZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.eFdLWY5NkI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X2KodDHPJZ ++ cat /tmp/tmp.eFdLWY5NkI ++ rm /tmp/tmp.X2KodDHPJZ /tmp/tmp.eFdLWY5NkI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9HF9rdQ8WN +++ mktemp ++ local LAST_ERR=/tmp/tmp.9bIvoDqTVM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9HF9rdQ8WN ++ cat /tmp/tmp.9bIvoDqTVM ++ rm /tmp/tmp.9HF9rdQ8WN /tmp/tmp.9bIvoDqTVM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pMLa8N937w +++ mktemp ++ local LAST_ERR=/tmp/tmp.cDuGmdERYJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pMLa8N937w ++ cat /tmp/tmp.cDuGmdERYJ ++ rm /tmp/tmp.pMLa8N937w /tmp/tmp.cDuGmdERYJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y5BMjAbLlt +++ mktemp ++ local LAST_ERR=/tmp/tmp.pjVFlTQOmj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y5BMjAbLlt ++ cat /tmp/tmp.pjVFlTQOmj ++ rm /tmp/tmp.Y5BMjAbLlt /tmp/tmp.pjVFlTQOmj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gdxKDhVRNt +++ mktemp ++ local LAST_ERR=/tmp/tmp.lfbrC7QJLC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gdxKDhVRNt ++ cat /tmp/tmp.lfbrC7QJLC ++ rm /tmp/tmp.gdxKDhVRNt /tmp/tmp.lfbrC7QJLC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c1JNDKUWND +++ mktemp ++ local LAST_ERR=/tmp/tmp.msmvQVjFss ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c1JNDKUWND ++ cat /tmp/tmp.msmvQVjFss ++ rm /tmp/tmp.c1JNDKUWND /tmp/tmp.msmvQVjFss ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gx3hCJqwYc +++ mktemp ++ local LAST_ERR=/tmp/tmp.tFKfHb1RuI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gx3hCJqwYc ++ cat /tmp/tmp.tFKfHb1RuI ++ rm /tmp/tmp.Gx3hCJqwYc /tmp/tmp.tFKfHb1RuI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EyPcLTvpkb +++ mktemp ++ local LAST_ERR=/tmp/tmp.8pIMPvc6Cq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EyPcLTvpkb ++ cat /tmp/tmp.8pIMPvc6Cq ++ rm /tmp/tmp.EyPcLTvpkb /tmp/tmp.8pIMPvc6Cq ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.OK + [[ true == true ]] + compare_mongos_cmd find myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 -sharded + local command=find + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local postfix=-sharded + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M7wrfY8Sw7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yoTpzONBpq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M7wrfY8Sw7 ++ cat /tmp/tmp.yoTpzONBpq ++ rm /tmp/tmp.M7wrfY8Sw7 /tmp/tmp.yoTpzONBpq ++ return 0 + local client_container=psmdb-client-8f86f7874-hxc5c + kubectl_bin exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.SRaGWH8O3K ++ mktemp + local LAST_ERR=/tmp/tmp.SN83qBND6c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-8f86f7874-hxc5c -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.demand-backup-incremental-sharded-31890.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SRaGWH8O3K + cat /tmp/tmp.SN83qBND6c + rm /tmp/tmp.SRaGWH8O3K /tmp/tmp.SN83qBND6c + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/e2e-tests/demand-backup-incremental-sharded/compare/find-sharded.json /tmp/tmp.Ncy5tVg2lu/find-sharded + destroy demand-backup-incremental-sharded-31890 + local namespace=demand-backup-incremental-sharded-31890 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.M54ILenKOo +++ mktemp ++ local LAST_ERR=/tmp/tmp.NXey5VhjJT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M54ILenKOo ++ cat /tmp/tmp.NXey5VhjJT ++ rm /tmp/tmp.M54ILenKOo /tmp/tmp.NXey5VhjJT ++ return 0 + '[' 5 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.lQas0R7kUD ++ mktemp + local LAST_ERR=/tmp/tmp.tX71wQHGdu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lQas0R7kUD NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-aws-s3-sharded some-name aws-s3 s3://operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:30:59Z incremental-base 1.71MB ready 48m 48m backup-azure-blob-sharded some-name azure-blob azure://operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:32:04Z incremental-base 2.07MB ready 47m 48m backup-gcp-cs-sharded some-name gcp-cs s3://operator-testing/psmdb-demand-backup-incremental-sharded/2025-11-10T02:31:37Z incremental-base 1.52MB ready 47m 48m backup-minio-not-base-sharded some-name minio s3://operator-testing/2025-11-10T02:33:24Z incremental 2.33MB ready 46m 46m backup-minio-sharded some-name minio s3://operator-testing/2025-11-10T02:32:42Z incremental-base 2.45MB ready 46m 46m + cat /tmp/tmp.tX71wQHGdu + rm /tmp/tmp.lQas0R7kUD /tmp/tmp.tX71wQHGdu + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.r1ANrx1Y9w ++ mktemp + local LAST_ERR=/tmp/tmp.LL9uVvcUPP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r1ANrx1Y9w perconaservermongodbbackup.psmdb.percona.com "backup-aws-s3-sharded" deleted from demand-backup-incremental-sharded-31890 namespace perconaservermongodbbackup.psmdb.percona.com "backup-azure-blob-sharded" deleted from demand-backup-incremental-sharded-31890 namespace perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs-sharded" deleted from demand-backup-incremental-sharded-31890 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-not-base-sharded" deleted from demand-backup-incremental-sharded-31890 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-sharded" deleted from demand-backup-incremental-sharded-31890 namespace + cat /tmp/tmp.LL9uVvcUPP + rm /tmp/tmp.r1ANrx1Y9w /tmp/tmp.LL9uVvcUPP + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.GBOcFwsKFX ++ mktemp + local LAST_ERR=/tmp/tmp.ZZSLFZohCa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GBOcFwsKFX customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.ZZSLFZohCa + rm /tmp/tmp.GBOcFwsKFX /tmp/tmp.ZZSLFZohCa + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UXcmFwIy6x ++ mktemp + local LAST_ERR=/tmp/tmp.imcHowrD7T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UXcmFwIy6x + cat /tmp/tmp.imcHowrD7T + rm /tmp/tmp.UXcmFwIy6x /tmp/tmp.imcHowrD7T + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.uOl7jmGZe5 ++ mktemp + local LAST_ERR=/tmp/tmp.SFYoJQSbEC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uOl7jmGZe5 + cat /tmp/tmp.SFYoJQSbEC + rm /tmp/tmp.uOl7jmGZe5 /tmp/tmp.SFYoJQSbEC + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.mMOxVhVusl ++ mktemp + local LAST_ERR=/tmp/tmp.F0Ew8HKDBZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mMOxVhVusl + cat /tmp/tmp.F0Ew8HKDBZ + rm /tmp/tmp.mMOxVhVusl /tmp/tmp.F0Ew8HKDBZ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.iLTfIKuomS ++ mktemp + local LAST_ERR=/tmp/tmp.plpaDNClpW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2106/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iLTfIKuomS clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.plpaDNClpW + rm /tmp/tmp.iLTfIKuomS /tmp/tmp.plpaDNClpW + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.eXeVTifhF4 ++ mktemp + local LAST_ERR=/tmp/tmp.76TmTTxKtw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eXeVTifhF4 + cat /tmp/tmp.76TmTTxKtw Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eXeVTifhF4 + cat /tmp/tmp.76TmTTxKtw Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eXeVTifhF4 + cat /tmp/tmp.76TmTTxKtw Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.eXeVTifhF4 + cat /tmp/tmp.76TmTTxKtw Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.eXeVTifhF4 /tmp/tmp.76TmTTxKtw + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-incremental-sharded-31890 + rm -rf /tmp/tmp.Ncy5tVg2lu + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.I2t6q0ULv0 + local LAST_OUT=/tmp/tmp.Cl0QQ077fa ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.S2eb9LZjMg + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.7ulHz66rCM + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-incremental-sharded-31890