Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/logs/demand-backup-incremental.log grep: warning: stray \ before - WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra demand-backup-incremental-25297 + local ns=demand-backup-incremental-25297 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.pFx4a2xBER ++ mktemp + local LAST_ERR=/tmp/tmp.gRSm3TaAgu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pFx4a2xBER customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.gRSm3TaAgu + rm /tmp/tmp.pFx4a2xBER /tmp/tmp.gRSm3TaAgu + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-22620 backup-aws-s3 --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-22620 backup-azure-blob --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-22620 backup-gcp-cs --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-22620 backup-minio --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-incremental-22620 backup-minio-not-base --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0oWdiY2mmB ++ mktemp + local LAST_ERR=/tmp/tmp.5xFLuGffCQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0oWdiY2mmB customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.5xFLuGffCQ + rm /tmp/tmp.0oWdiY2mmB /tmp/tmp.5xFLuGffCQ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.E1WnQQLAhO ++ mktemp + local LAST_ERR=/tmp/tmp.1MiZFDCNl0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E1WnQQLAhO + cat /tmp/tmp.1MiZFDCNl0 + rm /tmp/tmp.E1WnQQLAhO /tmp/tmp.1MiZFDCNl0 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.p9wL2T959u ++ mktemp + local LAST_ERR=/tmp/tmp.ISlAMwedGk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p9wL2T959u customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.ISlAMwedGk + rm /tmp/tmp.p9wL2T959u /tmp/tmp.ISlAMwedGk + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.emmes6Y6x2 ++ mktemp + local LAST_ERR=/tmp/tmp.WOLailLqYa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.emmes6Y6x2 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.WOLailLqYa + rm /tmp/tmp.emmes6Y6x2 /tmp/tmp.WOLailLqYa + return 0 + check_crd_for_deletion PR-2009-645e0543 + local git_tag=PR-2009-645e0543 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2009-645e0543/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xIPRl3ifID +++ mktemp ++ local LAST_ERR=/tmp/tmp.zcgwvkVVAP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xIPRl3ifID ++ cat /tmp/tmp.zcgwvkVVAP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xIPRl3ifID ++ cat /tmp/tmp.zcgwvkVVAP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xIPRl3ifID ++ cat /tmp/tmp.zcgwvkVVAP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.xIPRl3ifID ++ cat /tmp/tmp.zcgwvkVVAP Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.xIPRl3ifID /tmp/tmp.zcgwvkVVAP ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + awk '{print$1}' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + xargs kubectl delete ns + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.oxgjSv3FpP ++ mktemp + local LAST_OUT=/tmp/tmp.GvN2i5ytMN ++ mktemp + local LAST_ERR=/tmp/tmp.ryyLTdC3oI + local exit_status=0 + local timeout=4 ++ seq 0 2 egrep: warning: egrep is obsolescent; using grep -E + local LAST_ERR=/tmp/tmp.y8EXrogICz + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) ++ seq 0 2 + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oxgjSv3FpP + cat /tmp/tmp.ryyLTdC3oI + rm /tmp/tmp.oxgjSv3FpP /tmp/tmp.ryyLTdC3oI + return 0 namespace "demand-backup-incremental-22620" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GvN2i5ytMN namespace "psmdb-operator" deleted + cat /tmp/tmp.y8EXrogICz + rm /tmp/tmp.GvN2i5ytMN /tmp/tmp.y8EXrogICz + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WIfM0AFdCT ++ mktemp + local LAST_ERR=/tmp/tmp.IlfS6RmuEc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WIfM0AFdCT + cat /tmp/tmp.IlfS6RmuEc + rm /tmp/tmp.WIfM0AFdCT /tmp/tmp.IlfS6RmuEc + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Tsi0fLEcXF ++ mktemp + local LAST_ERR=/tmp/tmp.4iXR5WaZOq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tsi0fLEcXF namespace/psmdb-operator created + cat /tmp/tmp.4iXR5WaZOq + rm /tmp/tmp.Tsi0fLEcXF /tmp/tmp.4iXR5WaZOq + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nu8fQKavGi +++ mktemp ++ local LAST_ERR=/tmp/tmp.3M38QZyFJb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nu8fQKavGi ++ cat /tmp/tmp.3M38QZyFJb ++ rm /tmp/tmp.nu8fQKavGi /tmp/tmp.3M38QZyFJb ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-3-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.sIGszwh1vf ++ mktemp + local LAST_ERR=/tmp/tmp.kGJv167X3f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-3-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sIGszwh1vf Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-3-cluster1" modified. + cat /tmp/tmp.kGJv167X3f + rm /tmp/tmp.sIGszwh1vf /tmp/tmp.kGJv167X3f + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TbHdKSHmIi ++ mktemp + local LAST_ERR=/tmp/tmp.5iGL2TK13C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TbHdKSHmIi customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.5iGL2TK13C + rm /tmp/tmp.TbHdKSHmIi /tmp/tmp.5iGL2TK13C + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.q6dhLpgP7s ++ mktemp + local LAST_ERR=/tmp/tmp.BK63OxUO8K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q6dhLpgP7s clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.BK63OxUO8K + rm /tmp/tmp.q6dhLpgP7s /tmp/tmp.BK63OxUO8K + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2009-645e0543") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Oq17vTMdg7 ++ mktemp + local LAST_ERR=/tmp/tmp.Bma2LsaCAN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Oq17vTMdg7 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.Bma2LsaCAN + rm /tmp/tmp.Oq17vTMdg7 /tmp/tmp.Bma2LsaCAN + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.2DwGOQxXKj +++ mktemp ++ local LAST_ERR=/tmp/tmp.nogagLklsl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2DwGOQxXKj ++ cat /tmp/tmp.nogagLklsl ++ rm /tmp/tmp.2DwGOQxXKj /tmp/tmp.nogagLklsl ++ return 0 + wait_pod percona-server-mongodb-operator-677cf67bbc-2dxdl + local pod=percona-server-mongodb-operator-677cf67bbc-2dxdl + set +o xtrace waiting for pod/percona-server-mongodb-operator-677cf67bbc-2dxdl to be ready.....OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.klzZFpMAUK +++ mktemp ++ local LAST_ERR=/tmp/tmp.gJ1c0DiAAi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.klzZFpMAUK ++ cat /tmp/tmp.gJ1c0DiAAi ++ rm /tmp/tmp.klzZFpMAUK /tmp/tmp.gJ1c0DiAAi ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-677cf67bbc-2dxdl ++ mktemp + local LAST_OUT=/tmp/tmp.HbhbrMyqyf ++ mktemp + local LAST_ERR=/tmp/tmp.FGREqCRCPf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-677cf67bbc-2dxdl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HbhbrMyqyf + cat /tmp/tmp.FGREqCRCPf + rm /tmp/tmp.HbhbrMyqyf /tmp/tmp.FGREqCRCPf + return 0 2025-08-07T13:19:36.426Z INFO setup Manager starting up {"gitCommit": "645e054370b1c94a71b245f25e4838f59389aa71", "gitBranch": "PR-2009-645e0543", "buildTime": "", "goVersion": "go1.24.6", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-incremental-25297 + local namespace=demand-backup-incremental-25297 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-incremental-25297' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-incremental-25297 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-incremental-25297 --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.clOsMiZfoL ++ mktemp + local LAST_OUT=/tmp/tmp.aSgvoR9e8k ++ mktemp + local LAST_ERR=/tmp/tmp.r7pUZb5sif + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.vVdp7aJQBm ++ seq 0 2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-incremental-25297 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.clOsMiZfoL + cat /tmp/tmp.r7pUZb5sif + rm /tmp/tmp.clOsMiZfoL /tmp/tmp.r7pUZb5sif + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aSgvoR9e8k + cat /tmp/tmp.vVdp7aJQBm + rm /tmp/tmp.aSgvoR9e8k /tmp/tmp.vVdp7aJQBm + return 0 + kubectl_bin wait --for=delete namespace demand-backup-incremental-25297 ++ mktemp + local LAST_OUT=/tmp/tmp.2mWpU4ANz2 ++ mktemp + local LAST_ERR=/tmp/tmp.q72SPhUfGE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-incremental-25297 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2mWpU4ANz2 + cat /tmp/tmp.q72SPhUfGE + rm /tmp/tmp.2mWpU4ANz2 /tmp/tmp.q72SPhUfGE + return 0 + desc 'create namespace demand-backup-incremental-25297' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-incremental-25297 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-incremental-25297 ++ mktemp + local LAST_OUT=/tmp/tmp.yNKPtOSfuM ++ mktemp + local LAST_ERR=/tmp/tmp.wn6beYBfEp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-incremental-25297 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yNKPtOSfuM namespace/demand-backup-incremental-25297 created + cat /tmp/tmp.wn6beYBfEp + rm /tmp/tmp.yNKPtOSfuM /tmp/tmp.wn6beYBfEp + return 0 + set_kube_ctx demand-backup-incremental-25297 + local namespace=demand-backup-incremental-25297 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.z7fI5BNSxj +++ mktemp ++ local LAST_ERR=/tmp/tmp.awnw7bV1tj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z7fI5BNSxj ++ cat /tmp/tmp.awnw7bV1tj ++ rm /tmp/tmp.z7fI5BNSxj /tmp/tmp.awnw7bV1tj ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-3-cluster1 --namespace=demand-backup-incremental-25297 ++ mktemp + local LAST_OUT=/tmp/tmp.dxntrNFVGE ++ mktemp + local LAST_ERR=/tmp/tmp.PgRF0Hdsgd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-3-cluster1 --namespace=demand-backup-incremental-25297 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dxntrNFVGE Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2009-645e0543-3-cluster1" modified. + cat /tmp/tmp.PgRF0Hdsgd + rm /tmp/tmp.dxntrNFVGE /tmp/tmp.PgRF0Hdsgd + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Thu Aug 7 13:19:49 2025 NAMESPACE: demand-backup-incremental-25297 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-incremental-25297.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-incremental-25297 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-incremental-25297 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-incremental-25297 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-incremental-25297 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7QtHg5weIh +++ mktemp ++ local LAST_ERR=/tmp/tmp.uQuPy6MzrP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7QtHg5weIh ++ cat /tmp/tmp.uQuPy6MzrP ++ rm /tmp/tmp.7QtHg5weIh /tmp/tmp.uQuPy6MzrP ++ return 0 + MINIO_POD=minio-service-86dfccd949-tf6rm + wait_pod minio-service-86dfccd949-tf6rm + local pod=minio-service-86dfccd949-tf6rm + set +o xtrace waiting for pod/minio-service-86dfccd949-tf6rm to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-25297.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.h0DrDz0t00 ++ mktemp + local LAST_ERR=/tmp/tmp.wC8cBb12LJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-incremental-25297.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.h0DrDz0t00 service/minio-service created + cat /tmp/tmp.wC8cBb12LJ + rm /tmp/tmp.h0DrDz0t00 /tmp/tmp.wC8cBb12LJ + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.el8YAakHhb ++ mktemp + local LAST_ERR=/tmp/tmp.rX92d1E4TI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.el8YAakHhb make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.rX92d1E4TI If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-incremental-25297 + rm /tmp/tmp.el8YAakHhb /tmp/tmp.rX92d1E4TI + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.8Q2X7dWp4R ++ mktemp + local LAST_ERR=/tmp/tmp.5lf3ezO0Tx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8Q2X7dWp4R secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.5lf3ezO0Tx + rm /tmp/tmp.8Q2X7dWp4R /tmp/tmp.5lf3ezO0Tx + return 0 + desc 'Testing on not sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- Testing on not sharded cluster ----------------------------------------------------------------------------------- + log 'Creating PSMDB cluster' + set +o xtrace [2025-08-07T13:20:27+0000] Creating PSMDB cluster + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6XX3QMsDh1 ++ mktemp + local LAST_ERR=/tmp/tmp.pI7CgevDAW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6XX3QMsDh1 secret/some-users created + cat /tmp/tmp.pI7CgevDAW + rm /tmp/tmp.6XX3QMsDh1 /tmp/tmp.pI7CgevDAW + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2009-645e0543"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.EQ1f4GOXsp ++ mktemp + local LAST_ERR=/tmp/tmp.lvCPkIDWhy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EQ1f4GOXsp perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.lvCPkIDWhy + rm /tmp/tmp.EQ1f4GOXsp /tmp/tmp.lvCPkIDWhy + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4gU48VVbuQ ++ mktemp + local LAST_ERR=/tmp/tmp.Aij2c0Part + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4gU48VVbuQ deployment.apps/psmdb-client created + cat /tmp/tmp.Aij2c0Part + rm /tmp/tmp.4gU48VVbuQ /tmp/tmp.Aij2c0Part + return 0 + log 'check if all pods started' + set +o xtrace [2025-08-07T13:20:30+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fu8GoY6Zu1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jmsx5xOg44 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fu8GoY6Zu1 ++ cat /tmp/tmp.jmsx5xOg44 ++ rm /tmp/tmp.Fu8GoY6Zu1 /tmp/tmp.jmsx5xOg44 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Niol4c4BvK +++ mktemp ++ local LAST_ERR=/tmp/tmp.UXXaLYqvct ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Niol4c4BvK ++ cat /tmp/tmp.UXXaLYqvct ++ rm /tmp/tmp.Niol4c4BvK /tmp/tmp.UXXaLYqvct ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CZugeHODow +++ mktemp ++ local LAST_ERR=/tmp/tmp.PHdII0SwrV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CZugeHODow ++ cat /tmp/tmp.PHdII0SwrV ++ rm /tmp/tmp.CZugeHODow /tmp/tmp.PHdII0SwrV ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gjRgsYgUzv +++ mktemp ++ local LAST_ERR=/tmp/tmp.J6THyXkq4z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gjRgsYgUzv ++ cat /tmp/tmp.J6THyXkq4z ++ rm /tmp/tmp.gjRgsYgUzv /tmp/tmp.J6THyXkq4z ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + log 'writing test data' + set +o xtrace [2025-08-07T13:22:12+0000] writing test data + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-25297 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BdcMAxgsdI +++ mktemp ++ local LAST_ERR=/tmp/tmp.ghAwgmxHJb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BdcMAxgsdI ++ cat /tmp/tmp.ghAwgmxHJb ++ rm /tmp/tmp.BdcMAxgsdI /tmp/tmp.ghAwgmxHJb ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Gh6KYIqr9R ++ mktemp + local LAST_ERR=/tmp/tmp.TQImOHSuLy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Gh6KYIqr9R Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("274b654d-9d11-4cec-9f83-e6f94374625d") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.TQImOHSuLy + rm /tmp/tmp.Gh6KYIqr9R /tmp/tmp.TQImOHSuLy + return 0 + sleep 1 + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JCaPJyvIPu +++ mktemp ++ local LAST_ERR=/tmp/tmp.UBAkzmGdml ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JCaPJyvIPu ++ cat /tmp/tmp.UBAkzmGdml ++ rm /tmp/tmp.JCaPJyvIPu /tmp/tmp.UBAkzmGdml ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1c0v3l1pJH ++ mktemp + local LAST_ERR=/tmp/tmp.osI16mPARY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1c0v3l1pJH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("32fe3af0-052a-4a7b-8fc3-489144e286e7") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.osI16mPARY + rm /tmp/tmp.1c0v3l1pJH /tmp/tmp.osI16mPARY + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:22:22+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.lgU1rLtIhe +++ mktemp ++ local LAST_ERR=/tmp/tmp.rHtuSQRWgs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lgU1rLtIhe ++ cat /tmp/tmp.rHtuSQRWgs ++ rm /tmp/tmp.lgU1rLtIhe /tmp/tmp.rHtuSQRWgs ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yWIpRP5VNj ++ mktemp + local LAST_ERR=/tmp/tmp.BTchhGyzEJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yWIpRP5VNj + cat /tmp/tmp.BTchhGyzEJ + rm /tmp/tmp.yWIpRP5VNj /tmp/tmp.BTchhGyzEJ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:22:23+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.c4HpF1ZHSa +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bilxt0plY1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c4HpF1ZHSa ++ cat /tmp/tmp.Bilxt0plY1 ++ rm /tmp/tmp.c4HpF1ZHSa /tmp/tmp.Bilxt0plY1 ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.he16sIIYnC ++ mktemp + local LAST_ERR=/tmp/tmp.DmuhqzWXKj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.he16sIIYnC + cat /tmp/tmp.DmuhqzWXKj + rm /tmp/tmp.he16sIIYnC /tmp/tmp.DmuhqzWXKj + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:22:25+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.fXO5ZFTr41 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PLGkCmHEuE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fXO5ZFTr41 ++ cat /tmp/tmp.PLGkCmHEuE ++ rm /tmp/tmp.fXO5ZFTr41 /tmp/tmp.PLGkCmHEuE ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.YWWY2SDMQe ++ mktemp + local LAST_ERR=/tmp/tmp.ef1FYFogaf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YWWY2SDMQe + cat /tmp/tmp.ef1FYFogaf + rm /tmp/tmp.YWWY2SDMQe /tmp/tmp.ef1FYFogaf + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + log 'running backups' + set +o xtrace [2025-08-07T13:22:26+0000] running backups + '[' -z '' ']' + backup_name_aws=backup-aws-s3 + backup_name_gcp=backup-gcp-cs + backup_name_azure=backup-azure-blob + run_backup aws-s3 backup-aws-s3 + local storage=aws-s3 + local backup_name=backup-aws-s3 + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-aws-s3/' + yq '.spec.type="incremental-base"' + /usr/sbin/sed -e 's/storageName:/storageName: aws-s3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.YqduzHFh1v ++ mktemp + local LAST_ERR=/tmp/tmp.pIFlRyDUdl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YqduzHFh1v perconaservermongodbbackup.psmdb.percona.com/backup-aws-s3 created + cat /tmp/tmp.pIFlRyDUdl + rm /tmp/tmp.YqduzHFh1v /tmp/tmp.pIFlRyDUdl + return 0 + run_backup gcp-cs backup-gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-gcp-cs/' + /usr/sbin/sed -e 's/storageName:/storageName: gcp-cs/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.CuZnq9pug1 ++ mktemp + local LAST_ERR=/tmp/tmp.KIgPhy3tqu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CuZnq9pug1 perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.KIgPhy3tqu + rm /tmp/tmp.CuZnq9pug1 /tmp/tmp.KIgPhy3tqu + return 0 + run_backup azure-blob backup-azure-blob + local storage=azure-blob + local backup_name=backup-azure-blob + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-azure-blob/' + /usr/sbin/sed -e 's/storageName:/storageName: azure-blob/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.fICReAbQvq ++ mktemp + local LAST_ERR=/tmp/tmp.Ll5tblEdZ8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fICReAbQvq perconaservermongodbbackup.psmdb.percona.com/backup-azure-blob created + cat /tmp/tmp.Ll5tblEdZ8 + rm /tmp/tmp.fICReAbQvq /tmp/tmp.Ll5tblEdZ8 + return 0 + wait_backup backup-aws-s3 + local backup_name=backup-aws-s3 + local target_state=ready + set +o xtrace waiting for backup-aws-s3 to reach ready state................. + check_backup_in_storage backup-aws-s3 s3 rs0 + local backup=backup-aws-s3 + local storage_type=s3 + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=s3.amazonaws.com ++ get_backup_dest backup-aws-s3 ++ local backup_name=backup-aws-s3 ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e5qLIq3ULL +++ mktemp ++ local LAST_ERR=/tmp/tmp.lONcdb0CBy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-aws-s3 -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e5qLIq3ULL ++ cat /tmp/tmp.lONcdb0CBy ++ rm /tmp/tmp.e5qLIq3ULL /tmp/tmp.lONcdb0CBy ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:28Z + [[ s3 == \m\i\n\i\o ]] + local url=https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:28Z/rs0/filelist.pbm + log 'checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:28Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-07T13:22:56+0000] checking if https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:28Z/rs0/filelist.pbm exists + curl --fail --head https://s3.amazonaws.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:28Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 13661 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK x-amz-id-2: SQFM2X58wgtYpiyqT1yRjqScPLv236mFBPJubC9aqpIHzzVGh/0gNzrAAvWog/gkXGKNUfC/b5TjSyCxSZtRf7zQc/q0wvr07TBBzE1JHJo= x-amz-request-id: 6ZVVY3H4NHEXS8TB Date: Thu, 07 Aug 2025 13:22:57 GMT Last-Modified: Thu, 07 Aug 2025 13:22:48 GMT x-amz-expiration: expiry-date="Sat, 09 Aug 2025 00:00:00 GMT", rule-id="1 Days Cleanup" ETag: "bca46c94a6a849a70ea0690a7ee8bc4e" x-amz-server-side-encryption: AES256 Accept-Ranges: bytes Content-Type: application/octet-stream Content-Length: 13661 Server: AmazonS3 + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + local target_state=ready + set +o xtrace waiting for backup-gcp-cs to reach ready state.............. + check_backup_in_storage backup-gcp-cs gcs rs0 + local backup=backup-gcp-cs + local storage_type=gcs + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=storage.googleapis.com ++ get_backup_dest backup-gcp-cs ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ local backup_name=backup-gcp-cs ++ kubectl_bin get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' +++ mktemp ++ sed 's|s3://||' ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.kJ5Y4TI4ud +++ mktemp ++ local LAST_ERR=/tmp/tmp.eWNYKCqcZD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kJ5Y4TI4ud ++ cat /tmp/tmp.eWNYKCqcZD ++ rm /tmp/tmp.kJ5Y4TI4ud /tmp/tmp.eWNYKCqcZD ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:55Z + [[ gcs == \m\i\n\i\o ]] + local url=https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:55Z/rs0/filelist.pbm + log 'checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:55Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-07T13:23:17+0000] checking if https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:55Z/rs0/filelist.pbm exists + curl --fail --head https://storage.googleapis.com/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:55Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 13661 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/2 200 content-type: application/octet-stream x-guploader-uploadid: ABgVH8_FX15F6Xpsul8DAa_QaMSeTBVeowMxX456aIiJkOLOoIGd7FexE-YZKuyCJ5QfY_bYY8FP5Vc expires: Thu, 07 Aug 2025 14:23:17 GMT date: Thu, 07 Aug 2025 13:23:17 GMT cache-control: public, max-age=3600 last-modified: Thu, 07 Aug 2025 13:23:11 GMT etag: "656a0c917bcf7e79463b4f062329a366" x-goog-generation: 1754572991662411 x-goog-metageneration: 1 x-goog-stored-content-encoding: identity x-goog-stored-content-length: 13661 x-goog-hash: crc32c=chuAKA== x-goog-hash: md5=ZWoMkXvPfnlGO08GIymjZg== x-goog-expiration: Fri, 08 Aug 2025 13:23:11 GMT x-goog-storage-class: REGIONAL accept-ranges: bytes content-length: 13661 server: UploadServer alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + wait_backup backup-azure-blob + local backup_name=backup-azure-blob + local target_state=ready + set +o xtrace waiting for backup-azure-blob to reach ready state.............. + check_backup_in_storage backup-azure-blob azure rs0 + local backup=backup-azure-blob + local storage_type=azure + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=engk8soperators.blob.core.windows.net ++ get_backup_dest backup-azure-blob ++ local backup_name=backup-azure-blob ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.84tqntIkT1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.d2yh3EpEaN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-azure-blob -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.84tqntIkT1 ++ cat /tmp/tmp.d2yh3EpEaN ++ rm /tmp/tmp.84tqntIkT1 /tmp/tmp.d2yh3EpEaN ++ return 0 + backup_dest=operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:23:17Z + [[ azure == \m\i\n\i\o ]] + local url=https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:23:17Z/rs0/filelist.pbm + log 'checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:23:17Z/rs0/filelist.pbm exists' + set +o xtrace [2025-08-07T13:23:39+0000] checking if https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:23:17Z/rs0/filelist.pbm exists + curl --fail --head https://engk8soperators.blob.core.windows.net/operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:23:17Z/rs0/filelist.pbm % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 13661 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 HTTP/1.1 200 OK Content-Length: 13661 Content-Type: application/octet-stream Content-MD5: Uf8FNYwazAsIOTJLdTogwA== Last-Modified: Thu, 07 Aug 2025 13:23:31 GMT ETag: 0x8DDD5B59A4B4420 Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 x-ms-request-id: 3673f506-b01e-00a5-629e-078915000000 x-ms-version: 2009-09-19 x-ms-lease-status: unlocked x-ms-blob-type: BlockBlob Date: Thu, 07 Aug 2025 13:23:39 GMT + backup_name_minio=backup-minio + run_backup minio backup-minio + local storage=minio + local backup_name=backup-minio + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Uc61xY0ot1 ++ mktemp + local LAST_ERR=/tmp/tmp.plEmwplhb4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Uc61xY0ot1 perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.plEmwplhb4 + rm /tmp/tmp.Uc61xY0ot1 /tmp/tmp.plEmwplhb4 + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state........ + check_backup_in_storage backup-minio minio rs0 + local backup=backup-minio + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c02IHCKfHM +++ mktemp ++ local LAST_ERR=/tmp/tmp.RXO87ThZbJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c02IHCKfHM ++ cat /tmp/tmp.RXO87ThZbJ ++ rm /tmp/tmp.c02IHCKfHM /tmp/tmp.RXO87ThZbJ ++ return 0 + backup_dest=operator-testing/2025-08-07T13:23:40Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:23:40Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.HncwT4Tiyw ++ mktemp + local LAST_ERR=/tmp/tmp.N7e7VmyVDx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:23:40Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HncwT4Tiyw + cat /tmp/tmp.N7e7VmyVDx If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.HncwT4Tiyw /tmp/tmp.N7e7VmyVDx + return 0 + sleep 1 + let retry+=1 + '[' 9 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:23:40Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.8WDJt658DP ++ mktemp + local LAST_ERR=/tmp/tmp.FqbfL13FzJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:23:40Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8WDJt658DP + cat /tmp/tmp.FqbfL13FzJ If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-incremental-25297 + rm /tmp/tmp.8WDJt658DP /tmp/tmp.FqbfL13FzJ + return 0 2025-08-07 13:23:49 14461 filelist.pbm + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bV5mxxZPCO +++ mktemp ++ local LAST_ERR=/tmp/tmp.4nP7R3mYcR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bV5mxxZPCO ++ cat /tmp/tmp.4nP7R3mYcR ++ rm /tmp/tmp.bV5mxxZPCO /tmp/tmp.4nP7R3mYcR ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IeZ9O8PLe7 ++ mktemp + local LAST_ERR=/tmp/tmp.HMh9kEJ6RG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IeZ9O8PLe7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3402e534-6dea-4d1c-a703-82f06451aa6b") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.HMh9kEJ6RG + rm /tmp/tmp.IeZ9O8PLe7 /tmp/tmp.HMh9kEJ6RG + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:24:12+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.EAkoJ5JJOG +++ mktemp ++ local LAST_ERR=/tmp/tmp.rw645xiRDm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EAkoJ5JJOG ++ cat /tmp/tmp.rw645xiRDm ++ rm /tmp/tmp.EAkoJ5JJOG /tmp/tmp.rw645xiRDm ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RjxpeAVFf3 ++ mktemp + local LAST_ERR=/tmp/tmp.uIjHD5SsD4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RjxpeAVFf3 + cat /tmp/tmp.uIjHD5SsD4 + rm /tmp/tmp.RjxpeAVFf3 /tmp/tmp.uIjHD5SsD4 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.ayVnFSAdut/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:24:14+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.Hxk6ndzZow +++ mktemp ++ local LAST_ERR=/tmp/tmp.zl62Tbl4p7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Hxk6ndzZow ++ cat /tmp/tmp.zl62Tbl4p7 ++ rm /tmp/tmp.Hxk6ndzZow /tmp/tmp.zl62Tbl4p7 ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.phadw2hCca ++ mktemp + local LAST_ERR=/tmp/tmp.M8CFEvMvmt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.phadw2hCca + cat /tmp/tmp.M8CFEvMvmt + rm /tmp/tmp.phadw2hCca /tmp/tmp.M8CFEvMvmt + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.ayVnFSAdut/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:24:15+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.8F0lGMnzqx +++ mktemp ++ local LAST_ERR=/tmp/tmp.KBaeX22qZS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8F0lGMnzqx ++ cat /tmp/tmp.KBaeX22qZS ++ rm /tmp/tmp.8F0lGMnzqx /tmp/tmp.KBaeX22qZS ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1qhiTsjQ5H ++ mktemp + local LAST_ERR=/tmp/tmp.XYnrLRI39w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1qhiTsjQ5H + cat /tmp/tmp.XYnrLRI39w + rm /tmp/tmp.1qhiTsjQ5H /tmp/tmp.XYnrLRI39w + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.ayVnFSAdut/find-not-base + backup_name_minio_not_base=backup-minio-not-base + run_backup minio backup-minio-not-base false + local storage=minio + local backup_name=backup-minio-not-base + local base=false + local backup_type=incremental + [[ false == \t\r\u\e ]] + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-not-base/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.y07yVudafb ++ mktemp + local LAST_ERR=/tmp/tmp.6XaJiyrTxQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y07yVudafb perconaservermongodbbackup.psmdb.percona.com/backup-minio-not-base created + cat /tmp/tmp.6XaJiyrTxQ + rm /tmp/tmp.y07yVudafb /tmp/tmp.6XaJiyrTxQ + return 0 + wait_backup backup-minio-not-base + local backup_name=backup-minio-not-base + local target_state=ready + set +o xtrace waiting for backup-minio-not-base to reach ready state........ + check_backup_in_storage backup-minio-not-base minio rs0 + local backup=backup-minio-not-base + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio-not-base ++ local backup_name=backup-minio-not-base ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio-not-base -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e1lWw2w0G0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FYPL0QcKEQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-not-base -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e1lWw2w0G0 ++ cat /tmp/tmp.FYPL0QcKEQ ++ rm /tmp/tmp.e1lWw2w0G0 /tmp/tmp.FYPL0QcKEQ ++ return 0 + backup_dest=operator-testing/2025-08-07T13:24:18Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:24:18Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.Bham6W9wXe ++ mktemp + local LAST_ERR=/tmp/tmp.CkE6l9finl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:24:18Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bham6W9wXe + cat /tmp/tmp.CkE6l9finl If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_demand-backup-incremental-25297 + rm /tmp/tmp.Bham6W9wXe /tmp/tmp.CkE6l9finl + return 0 2025-08-07 13:24:24 13661 filelist.pbm + '[' -z '' ']' + run_restore backup-aws-s3 + local backup_name=backup-aws-s3 + log 'drop collection' + set +o xtrace [2025-08-07T13:24:35+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9uGS8CyI5t +++ mktemp ++ local LAST_ERR=/tmp/tmp.VrfXFcytP4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9uGS8CyI5t ++ cat /tmp/tmp.VrfXFcytP4 ++ rm /tmp/tmp.9uGS8CyI5t /tmp/tmp.VrfXFcytP4 ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.I3KeM0DD6H ++ mktemp + local LAST_ERR=/tmp/tmp.HuPPEefPX2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I3KeM0DD6H Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7ec4501f-028a-40c0-b54e-d4ed25dce3f5") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.HuPPEefPX2 + rm /tmp/tmp.I3KeM0DD6H /tmp/tmp.HuPPEefPX2 + return 0 + log 'check backup and restore -- backup-aws-s3' + set +o xtrace [2025-08-07T13:24:36+0000] check backup and restore -- backup-aws-s3 + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-aws-s3/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-aws-s3/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.tm9zO8KZYY ++ mktemp + local LAST_ERR=/tmp/tmp.bqTIVrBWdo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tm9zO8KZYY perconaservermongodbrestore.psmdb.percona.com/restore-backup-aws-s3 created + cat /tmp/tmp.bqTIVrBWdo + rm /tmp/tmp.tm9zO8KZYY /tmp/tmp.bqTIVrBWdo + return 0 + run_recovery_check backup-aws-s3 + local backup_name=backup-aws-s3 + local compare_suffix=_restore + local base=true + wait_restore backup-aws-s3 some-name requested 0 3000 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "requested" .OK after 1 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-25297", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.cAWo8wiofq ++ mktemp + local LAST_ERR=/tmp/tmp.HKzjHBVuK5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cAWo8wiofq + cat /tmp/tmp.HKzjHBVuK5 + rm /tmp/tmp.cAWo8wiofq /tmp/tmp.HKzjHBVuK5 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + wait_restore backup-aws-s3 some-name ready 0 1800 + local backup_name=backup-aws-s3 + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-aws-s3 object to be createdOK Waiting psmdb-restore/restore-backup-aws-s3 to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ yq '.metadata.annotations."percona.com/resync-pbm"' ++ kubectl_bin get psmdb some-name -o yaml +++ mktemp ++ local LAST_OUT=/tmp/tmp.qPnZwyc9ig +++ mktemp ++ local LAST_ERR=/tmp/tmp.FiGugO8GtD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qPnZwyc9ig ++ cat /tmp/tmp.FiGugO8GtD ++ rm /tmp/tmp.qPnZwyc9ig /tmp/tmp.FiGugO8GtD ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ADU8t2hOAy +++ mktemp ++ local LAST_ERR=/tmp/tmp.z89S6xMg9f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ADU8t2hOAy ++ cat /tmp/tmp.z89S6xMg9f ++ rm /tmp/tmp.ADU8t2hOAy /tmp/tmp.z89S6xMg9f ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HnDC2In1fK +++ mktemp ++ local LAST_ERR=/tmp/tmp.mAbkb2wvhM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HnDC2In1fK ++ cat /tmp/tmp.mAbkb2wvhM ++ rm /tmp/tmp.HnDC2In1fK /tmp/tmp.mAbkb2wvhM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fOyTQimBRb +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZQleIn38KE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fOyTQimBRb ++ cat /tmp/tmp.ZQleIn38KE ++ rm /tmp/tmp.fOyTQimBRb /tmp/tmp.ZQleIn38KE ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w6FtdBBX6b +++ mktemp ++ local LAST_ERR=/tmp/tmp.OkczMTQKxT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w6FtdBBX6b ++ cat /tmp/tmp.OkczMTQKxT ++ rm /tmp/tmp.w6FtdBBX6b /tmp/tmp.OkczMTQKxT ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4rG58PMYvB +++ mktemp ++ local LAST_ERR=/tmp/tmp.qcWmmAktR5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4rG58PMYvB ++ cat /tmp/tmp.qcWmmAktR5 ++ rm /tmp/tmp.4rG58PMYvB /tmp/tmp.qcWmmAktR5 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lmDiCaUtAD +++ mktemp ++ local LAST_ERR=/tmp/tmp.9C7ukXaBoN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lmDiCaUtAD ++ cat /tmp/tmp.9C7ukXaBoN ++ rm /tmp/tmp.lmDiCaUtAD /tmp/tmp.9C7ukXaBoN ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r67qnnQIeL +++ mktemp ++ local LAST_ERR=/tmp/tmp.uyPwSF0E1d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r67qnnQIeL ++ cat /tmp/tmp.uyPwSF0E1d ++ rm /tmp/tmp.r67qnnQIeL /tmp/tmp.uyPwSF0E1d ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.goe3J9H9xN +++ mktemp ++ local LAST_ERR=/tmp/tmp.0GKXB9TY3W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.goe3J9H9xN ++ cat /tmp/tmp.0GKXB9TY3W ++ rm /tmp/tmp.goe3J9H9xN /tmp/tmp.0GKXB9TY3W ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xv4YP4brHL +++ mktemp ++ local LAST_ERR=/tmp/tmp.dDJ1jxXlYF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Xv4YP4brHL ++ cat /tmp/tmp.dDJ1jxXlYF ++ rm /tmp/tmp.Xv4YP4brHL /tmp/tmp.dDJ1jxXlYF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aK1EC10NSV +++ mktemp ++ local LAST_ERR=/tmp/tmp.roUQoJvZUV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aK1EC10NSV ++ cat /tmp/tmp.roUQoJvZUV ++ rm /tmp/tmp.aK1EC10NSV /tmp/tmp.roUQoJvZUV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qhpSJClaRw +++ mktemp ++ local LAST_ERR=/tmp/tmp.KH5VsUkFcp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qhpSJClaRw ++ cat /tmp/tmp.KH5VsUkFcp ++ rm /tmp/tmp.qhpSJClaRw /tmp/tmp.KH5VsUkFcp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xOSi7nyvSQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.d5DJ0z3ngq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xOSi7nyvSQ ++ cat /tmp/tmp.d5DJ0z3ngq ++ rm /tmp/tmp.xOSi7nyvSQ /tmp/tmp.d5DJ0z3ngq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VtJYiHevoW +++ mktemp ++ local LAST_ERR=/tmp/tmp.axFEzz4jfc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VtJYiHevoW ++ cat /tmp/tmp.axFEzz4jfc ++ rm /tmp/tmp.VtJYiHevoW /tmp/tmp.axFEzz4jfc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.btJOHbGOO4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uhfl045uEG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.btJOHbGOO4 ++ cat /tmp/tmp.uhfl045uEG ++ rm /tmp/tmp.btJOHbGOO4 /tmp/tmp.uhfl045uEG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ievWG74Efx +++ mktemp ++ local LAST_ERR=/tmp/tmp.jwcwuELq6M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ievWG74Efx ++ cat /tmp/tmp.jwcwuELq6M ++ rm /tmp/tmp.ievWG74Efx /tmp/tmp.jwcwuELq6M ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sw1cwx4WKW +++ mktemp ++ local LAST_ERR=/tmp/tmp.FheNoRhw8E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sw1cwx4WKW ++ cat /tmp/tmp.FheNoRhw8E ++ rm /tmp/tmp.sw1cwx4WKW /tmp/tmp.FheNoRhw8E ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:31:13+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.6XVYHcLhn5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.n98YyhKv7n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6XVYHcLhn5 ++ cat /tmp/tmp.n98YyhKv7n ++ rm /tmp/tmp.6XVYHcLhn5 /tmp/tmp.n98YyhKv7n ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.AZYq5O7kVR ++ mktemp + local LAST_ERR=/tmp/tmp.Nchy1lO55W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AZYq5O7kVR + cat /tmp/tmp.Nchy1lO55W + rm /tmp/tmp.AZYq5O7kVR /tmp/tmp.Nchy1lO55W + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:31:14+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.A3j3MNRe9V +++ mktemp ++ local LAST_ERR=/tmp/tmp.N3XhMaxCpc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A3j3MNRe9V ++ cat /tmp/tmp.N3XhMaxCpc ++ rm /tmp/tmp.A3j3MNRe9V /tmp/tmp.N3XhMaxCpc ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.L0ToYsOfsK ++ mktemp + local LAST_ERR=/tmp/tmp.bESQrFQjuZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L0ToYsOfsK + cat /tmp/tmp.bESQrFQjuZ + rm /tmp/tmp.L0ToYsOfsK /tmp/tmp.bESQrFQjuZ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:31:16+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.NOr83p4Feq +++ mktemp ++ local LAST_ERR=/tmp/tmp.uJvLCg5CFB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NOr83p4Feq ++ cat /tmp/tmp.uJvLCg5CFB ++ rm /tmp/tmp.NOr83p4Feq /tmp/tmp.uJvLCg5CFB ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.AkHngm21Xx ++ mktemp + local LAST_ERR=/tmp/tmp.DWcyKPOSZZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AkHngm21Xx + cat /tmp/tmp.DWcyKPOSZZ + rm /tmp/tmp.AkHngm21Xx /tmp/tmp.DWcyKPOSZZ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + run_restore backup-gcp-cs + local backup_name=backup-gcp-cs + log 'drop collection' + set +o xtrace [2025-08-07T13:31:17+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.95rgYGYw7Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.RswNB9pW5G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.95rgYGYw7Q ++ cat /tmp/tmp.RswNB9pW5G ++ rm /tmp/tmp.95rgYGYw7Q /tmp/tmp.RswNB9pW5G ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ncsoz4x1ly ++ mktemp + local LAST_ERR=/tmp/tmp.Ru8lOnMSYh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ncsoz4x1ly Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("65468b2c-2ba2-4c41-a67e-9d3dfd686fad") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.Ru8lOnMSYh + rm /tmp/tmp.ncsoz4x1ly /tmp/tmp.Ru8lOnMSYh + return 0 + log 'check backup and restore -- backup-gcp-cs' + set +o xtrace [2025-08-07T13:31:19+0000] check backup and restore -- backup-gcp-cs + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-gcp-cs/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-gcp-cs/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.B9z7auAOH2 ++ mktemp + local LAST_ERR=/tmp/tmp.J2WXvIqNLY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B9z7auAOH2 perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs created + cat /tmp/tmp.J2WXvIqNLY + rm /tmp/tmp.B9z7auAOH2 /tmp/tmp.J2WXvIqNLY + return 0 + run_recovery_check backup-gcp-cs + local backup_name=backup-gcp-cs + local compare_suffix=_restore + local base=true + wait_restore backup-gcp-cs some-name requested 0 3000 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "requested" ..OK after 2 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-25297", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.RZXtVrzos4 ++ mktemp + local LAST_ERR=/tmp/tmp.KfsM5v34Yn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RZXtVrzos4 + cat /tmp/tmp.KfsM5v34Yn + rm /tmp/tmp.RZXtVrzos4 /tmp/tmp.KfsM5v34Yn + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + wait_restore backup-gcp-cs some-name ready 0 1800 + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be createdOK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TVkuUCN9PW +++ mktemp ++ local LAST_ERR=/tmp/tmp.nXMF7Ae0i8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TVkuUCN9PW ++ cat /tmp/tmp.nXMF7Ae0i8 ++ rm /tmp/tmp.TVkuUCN9PW /tmp/tmp.nXMF7Ae0i8 ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SAeXnk269W +++ mktemp ++ local LAST_ERR=/tmp/tmp.l4XRRU0YhZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SAeXnk269W ++ cat /tmp/tmp.l4XRRU0YhZ ++ rm /tmp/tmp.SAeXnk269W /tmp/tmp.l4XRRU0YhZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ulHLGu8uEo +++ mktemp ++ local LAST_ERR=/tmp/tmp.VsuPBvRseG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ulHLGu8uEo ++ cat /tmp/tmp.VsuPBvRseG ++ rm /tmp/tmp.ulHLGu8uEo /tmp/tmp.VsuPBvRseG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aOFCmeQGKG +++ mktemp ++ local LAST_ERR=/tmp/tmp.XRXipFPpxO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aOFCmeQGKG ++ cat /tmp/tmp.XRXipFPpxO ++ rm /tmp/tmp.aOFCmeQGKG /tmp/tmp.XRXipFPpxO ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U1npRM9ho3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dHPR2q8A19 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U1npRM9ho3 ++ cat /tmp/tmp.dHPR2q8A19 ++ rm /tmp/tmp.U1npRM9ho3 /tmp/tmp.dHPR2q8A19 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pjlMCQHfX3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xaY7ANuL8c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pjlMCQHfX3 ++ cat /tmp/tmp.xaY7ANuL8c ++ rm /tmp/tmp.pjlMCQHfX3 /tmp/tmp.xaY7ANuL8c ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uZ2YH4Oyrj +++ mktemp ++ local LAST_ERR=/tmp/tmp.oXgBJNFs49 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uZ2YH4Oyrj ++ cat /tmp/tmp.oXgBJNFs49 ++ rm /tmp/tmp.uZ2YH4Oyrj /tmp/tmp.oXgBJNFs49 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y5ARyRudWo +++ mktemp ++ local LAST_ERR=/tmp/tmp.1QJ5qGt3bJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y5ARyRudWo ++ cat /tmp/tmp.1QJ5qGt3bJ ++ rm /tmp/tmp.y5ARyRudWo /tmp/tmp.1QJ5qGt3bJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iV9adAQRLk +++ mktemp ++ local LAST_ERR=/tmp/tmp.kzemcA8A5k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iV9adAQRLk ++ cat /tmp/tmp.kzemcA8A5k ++ rm /tmp/tmp.iV9adAQRLk /tmp/tmp.kzemcA8A5k ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XyL7RwdWwc +++ mktemp ++ local LAST_ERR=/tmp/tmp.lERzABIfPX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XyL7RwdWwc ++ cat /tmp/tmp.lERzABIfPX ++ rm /tmp/tmp.XyL7RwdWwc /tmp/tmp.lERzABIfPX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aHZgNqH7e4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.m3x6iudTF2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aHZgNqH7e4 ++ cat /tmp/tmp.m3x6iudTF2 ++ rm /tmp/tmp.aHZgNqH7e4 /tmp/tmp.m3x6iudTF2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xL2r6Qkssh +++ mktemp ++ local LAST_ERR=/tmp/tmp.swtsgQy0FJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xL2r6Qkssh ++ cat /tmp/tmp.swtsgQy0FJ ++ rm /tmp/tmp.xL2r6Qkssh /tmp/tmp.swtsgQy0FJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jTepr4Zyh5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qP4Qfa88D3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jTepr4Zyh5 ++ cat /tmp/tmp.qP4Qfa88D3 ++ rm /tmp/tmp.jTepr4Zyh5 /tmp/tmp.qP4Qfa88D3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xfobEham6x +++ mktemp ++ local LAST_ERR=/tmp/tmp.SCyWtlchpE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xfobEham6x ++ cat /tmp/tmp.SCyWtlchpE ++ rm /tmp/tmp.xfobEham6x /tmp/tmp.SCyWtlchpE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eh4VM7n4co +++ mktemp ++ local LAST_ERR=/tmp/tmp.HEQYD6Qq0N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eh4VM7n4co ++ cat /tmp/tmp.HEQYD6Qq0N ++ rm /tmp/tmp.eh4VM7n4co /tmp/tmp.HEQYD6Qq0N ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fca6yrE4BT +++ mktemp ++ local LAST_ERR=/tmp/tmp.5Pjmr4Z0vq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fca6yrE4BT ++ cat /tmp/tmp.5Pjmr4Z0vq ++ rm /tmp/tmp.Fca6yrE4BT /tmp/tmp.5Pjmr4Z0vq ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:37:43+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.Wt3YATTdff +++ mktemp ++ local LAST_ERR=/tmp/tmp.QvaCV4m5QD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wt3YATTdff ++ cat /tmp/tmp.QvaCV4m5QD ++ rm /tmp/tmp.Wt3YATTdff /tmp/tmp.QvaCV4m5QD ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.uaHTOQMkji ++ mktemp + local LAST_ERR=/tmp/tmp.Q4BTMVtgoV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uaHTOQMkji + cat /tmp/tmp.Q4BTMVtgoV + rm /tmp/tmp.uaHTOQMkji /tmp/tmp.Q4BTMVtgoV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:37:45+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.2904TKNpJs +++ mktemp ++ local LAST_ERR=/tmp/tmp.3IDk6kDgnT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2904TKNpJs ++ cat /tmp/tmp.3IDk6kDgnT ++ rm /tmp/tmp.2904TKNpJs /tmp/tmp.3IDk6kDgnT ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.u0DmscKhAJ ++ mktemp + local LAST_ERR=/tmp/tmp.3YqEDcsq5f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u0DmscKhAJ + cat /tmp/tmp.3YqEDcsq5f + rm /tmp/tmp.u0DmscKhAJ /tmp/tmp.3YqEDcsq5f + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:37:46+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.jplojT0LYJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.30O2QqZxUB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jplojT0LYJ ++ cat /tmp/tmp.30O2QqZxUB ++ rm /tmp/tmp.jplojT0LYJ /tmp/tmp.30O2QqZxUB ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.LCzMLFvkdh ++ mktemp + local LAST_ERR=/tmp/tmp.M359MhbGcC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LCzMLFvkdh + cat /tmp/tmp.M359MhbGcC + rm /tmp/tmp.LCzMLFvkdh /tmp/tmp.M359MhbGcC + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + run_restore backup-azure-blob + local backup_name=backup-azure-blob + log 'drop collection' + set +o xtrace [2025-08-07T13:37:48+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WlVsy2YyKp +++ mktemp ++ local LAST_ERR=/tmp/tmp.enTngNNb3A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WlVsy2YyKp ++ cat /tmp/tmp.enTngNNb3A ++ rm /tmp/tmp.WlVsy2YyKp /tmp/tmp.enTngNNb3A ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vp2xlIhXsc ++ mktemp + local LAST_ERR=/tmp/tmp.88xnOw1d0M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vp2xlIhXsc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("593025b1-dffc-4485-8c43-53c7c964d1ed") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.88xnOw1d0M + rm /tmp/tmp.vp2xlIhXsc /tmp/tmp.88xnOw1d0M + return 0 + log 'check backup and restore -- backup-azure-blob' + set +o xtrace [2025-08-07T13:37:50+0000] check backup and restore -- backup-azure-blob + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-azure-blob/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-azure-blob/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OkfIz8p0we ++ mktemp + local LAST_ERR=/tmp/tmp.J2cNYSveoL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OkfIz8p0we perconaservermongodbrestore.psmdb.percona.com/restore-backup-azure-blob created + cat /tmp/tmp.J2cNYSveoL + rm /tmp/tmp.OkfIz8p0we /tmp/tmp.J2cNYSveoL + return 0 + run_recovery_check backup-azure-blob + local backup_name=backup-azure-blob + local compare_suffix=_restore + local base=true + wait_restore backup-azure-blob some-name requested 0 3000 + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob to reach state "requested" ..OK after 2 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-25297", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.tVx0NXYSaz ++ mktemp + local LAST_ERR=/tmp/tmp.yqUv0T4jAo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tVx0NXYSaz + cat /tmp/tmp.yqUv0T4jAo + rm /tmp/tmp.tVx0NXYSaz /tmp/tmp.yqUv0T4jAo + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + wait_restore backup-azure-blob some-name ready 0 1800 + local backup_name=backup-azure-blob + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-azure-blob object to be createdOK Waiting psmdb-restore/restore-backup-azure-blob to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3fS5acM2Y1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bLrkNMq4Dw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3fS5acM2Y1 ++ cat /tmp/tmp.bLrkNMq4Dw ++ rm /tmp/tmp.3fS5acM2Y1 /tmp/tmp.bLrkNMq4Dw ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zH07Sw99Ng +++ mktemp ++ local LAST_ERR=/tmp/tmp.CeucGiNd1b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zH07Sw99Ng ++ cat /tmp/tmp.CeucGiNd1b ++ rm /tmp/tmp.zH07Sw99Ng /tmp/tmp.CeucGiNd1b ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cwvtaBT9KD +++ mktemp ++ local LAST_ERR=/tmp/tmp.kJrW7hsRTD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cwvtaBT9KD ++ cat /tmp/tmp.kJrW7hsRTD ++ rm /tmp/tmp.cwvtaBT9KD /tmp/tmp.kJrW7hsRTD ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oDuSbfNh2m +++ mktemp ++ local LAST_ERR=/tmp/tmp.sXqZrBJc6k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oDuSbfNh2m ++ cat /tmp/tmp.sXqZrBJc6k ++ rm /tmp/tmp.oDuSbfNh2m /tmp/tmp.sXqZrBJc6k ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0pohHuquFl +++ mktemp ++ local LAST_ERR=/tmp/tmp.pq2uAHPZoy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0pohHuquFl ++ cat /tmp/tmp.pq2uAHPZoy ++ rm /tmp/tmp.0pohHuquFl /tmp/tmp.pq2uAHPZoy ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zSNJLh4com +++ mktemp ++ local LAST_ERR=/tmp/tmp.QpxHAPWrJN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zSNJLh4com ++ cat /tmp/tmp.QpxHAPWrJN ++ rm /tmp/tmp.zSNJLh4com /tmp/tmp.QpxHAPWrJN ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h2GE5XbeeZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.YxJqVu44Bi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h2GE5XbeeZ ++ cat /tmp/tmp.YxJqVu44Bi ++ rm /tmp/tmp.h2GE5XbeeZ /tmp/tmp.YxJqVu44Bi ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZZtmC2o44d +++ mktemp ++ local LAST_ERR=/tmp/tmp.YbHplSGu7M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZZtmC2o44d ++ cat /tmp/tmp.YbHplSGu7M ++ rm /tmp/tmp.ZZtmC2o44d /tmp/tmp.YbHplSGu7M ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5qz1CQetpG +++ mktemp ++ local LAST_ERR=/tmp/tmp.VA3Fh3rrUJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5qz1CQetpG ++ cat /tmp/tmp.VA3Fh3rrUJ ++ rm /tmp/tmp.5qz1CQetpG /tmp/tmp.VA3Fh3rrUJ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YM6egD1GHa +++ mktemp ++ local LAST_ERR=/tmp/tmp.j9PkoMiyf9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YM6egD1GHa ++ cat /tmp/tmp.j9PkoMiyf9 ++ rm /tmp/tmp.YM6egD1GHa /tmp/tmp.j9PkoMiyf9 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W8WWjc7zkO +++ mktemp ++ local LAST_ERR=/tmp/tmp.f5DS51gOw6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W8WWjc7zkO ++ cat /tmp/tmp.f5DS51gOw6 ++ rm /tmp/tmp.W8WWjc7zkO /tmp/tmp.f5DS51gOw6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p6tZT8IZQD +++ mktemp ++ local LAST_ERR=/tmp/tmp.XQ5ghAuKyF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p6tZT8IZQD ++ cat /tmp/tmp.XQ5ghAuKyF ++ rm /tmp/tmp.p6tZT8IZQD /tmp/tmp.XQ5ghAuKyF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FFkAU8TENx +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nf5y3s8QjM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FFkAU8TENx ++ cat /tmp/tmp.Nf5y3s8QjM ++ rm /tmp/tmp.FFkAU8TENx /tmp/tmp.Nf5y3s8QjM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v59Bo70qLn +++ mktemp ++ local LAST_ERR=/tmp/tmp.zA42GwtFok ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v59Bo70qLn ++ cat /tmp/tmp.zA42GwtFok ++ rm /tmp/tmp.v59Bo70qLn /tmp/tmp.zA42GwtFok ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GsPaMgMVyK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rw7YXJ1WNV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GsPaMgMVyK ++ cat /tmp/tmp.Rw7YXJ1WNV ++ rm /tmp/tmp.GsPaMgMVyK /tmp/tmp.Rw7YXJ1WNV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gk0JLVmf9A +++ mktemp ++ local LAST_ERR=/tmp/tmp.05mSk6U9YN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gk0JLVmf9A ++ cat /tmp/tmp.05mSk6U9YN ++ rm /tmp/tmp.Gk0JLVmf9A /tmp/tmp.05mSk6U9YN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WoHLL1tM3o +++ mktemp ++ local LAST_ERR=/tmp/tmp.6H1yBGA4Dy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WoHLL1tM3o ++ cat /tmp/tmp.6H1yBGA4Dy ++ rm /tmp/tmp.WoHLL1tM3o /tmp/tmp.6H1yBGA4Dy ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:44:28+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.KFYBPTqaQS +++ mktemp ++ local LAST_ERR=/tmp/tmp.V4bdqy43ev ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KFYBPTqaQS ++ cat /tmp/tmp.V4bdqy43ev ++ rm /tmp/tmp.KFYBPTqaQS /tmp/tmp.V4bdqy43ev ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wjYKsYvp7T ++ mktemp + local LAST_ERR=/tmp/tmp.63GI4SE6Oj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wjYKsYvp7T + cat /tmp/tmp.63GI4SE6Oj + rm /tmp/tmp.wjYKsYvp7T /tmp/tmp.63GI4SE6Oj + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:44:30+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.Bf55LiT2iO +++ mktemp ++ local LAST_ERR=/tmp/tmp.RBukbWamET ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bf55LiT2iO ++ cat /tmp/tmp.RBukbWamET ++ rm /tmp/tmp.Bf55LiT2iO /tmp/tmp.RBukbWamET ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kjqBmn4b1f ++ mktemp + local LAST_ERR=/tmp/tmp.rKMPqQdaZw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kjqBmn4b1f + cat /tmp/tmp.rKMPqQdaZw + rm /tmp/tmp.kjqBmn4b1f /tmp/tmp.rKMPqQdaZw + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:44:31+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.0qZI4WZNzr +++ mktemp ++ local LAST_ERR=/tmp/tmp.4LlGrQwUgq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0qZI4WZNzr ++ cat /tmp/tmp.4LlGrQwUgq ++ rm /tmp/tmp.0qZI4WZNzr /tmp/tmp.4LlGrQwUgq ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ke9OJKzo1N ++ mktemp + local LAST_ERR=/tmp/tmp.1YwJJGMipt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ke9OJKzo1N + cat /tmp/tmp.1YwJJGMipt + rm /tmp/tmp.ke9OJKzo1N /tmp/tmp.1YwJJGMipt + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + run_restore backup-minio-not-base + local backup_name=backup-minio-not-base + log 'drop collection' + set +o xtrace [2025-08-07T13:44:33+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JQJuS2RNTm +++ mktemp ++ local LAST_ERR=/tmp/tmp.y8xzyEmTeG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JQJuS2RNTm ++ cat /tmp/tmp.y8xzyEmTeG ++ rm /tmp/tmp.JQJuS2RNTm /tmp/tmp.y8xzyEmTeG ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1g0s5zgYgW ++ mktemp + local LAST_ERR=/tmp/tmp.zSDmBkPbH6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1g0s5zgYgW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ac54fe5c-0328-42e9-9496-7e937e74c7d3") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.zSDmBkPbH6 + rm /tmp/tmp.1g0s5zgYgW /tmp/tmp.zSDmBkPbH6 + return 0 + log 'check backup and restore -- backup-minio-not-base' + set +o xtrace [2025-08-07T13:44:35+0000] check backup and restore -- backup-minio-not-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-not-base/' + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-not-base/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KA3hTt8run ++ mktemp + local LAST_ERR=/tmp/tmp.n9CyKBKsAp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KA3hTt8run perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-not-base created + cat /tmp/tmp.n9CyKBKsAp + rm /tmp/tmp.KA3hTt8run /tmp/tmp.n9CyKBKsAp + return 0 + run_recovery_check backup-minio-not-base '' false + local backup_name=backup-minio-not-base + local compare_suffix=_restore + local base=false + wait_restore backup-minio-not-base some-name requested 0 3000 + local backup_name=backup-minio-not-base + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base to reach state "requested" ..OK after 2 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-25297", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.dLv56axhGu ++ mktemp + local LAST_ERR=/tmp/tmp.Kf1JQjsxjH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dLv56axhGu + cat /tmp/tmp.Kf1JQjsxjH + rm /tmp/tmp.dLv56axhGu /tmp/tmp.Kf1JQjsxjH + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + wait_restore backup-minio-not-base some-name ready 0 1800 + local backup_name=backup-minio-not-base + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-not-base object to be createdOK Waiting psmdb-restore/restore-backup-minio-not-base to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JpLyhxN9Ov +++ mktemp ++ local LAST_ERR=/tmp/tmp.evYLpbCMYM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JpLyhxN9Ov ++ cat /tmp/tmp.evYLpbCMYM ++ rm /tmp/tmp.JpLyhxN9Ov /tmp/tmp.evYLpbCMYM ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kCkUa5k1P3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PmsG1MNPx0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kCkUa5k1P3 ++ cat /tmp/tmp.PmsG1MNPx0 ++ rm /tmp/tmp.kCkUa5k1P3 /tmp/tmp.PmsG1MNPx0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0aolbHa7aU +++ mktemp ++ local LAST_ERR=/tmp/tmp.UJVGgsay1a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0aolbHa7aU ++ cat /tmp/tmp.UJVGgsay1a ++ rm /tmp/tmp.0aolbHa7aU /tmp/tmp.UJVGgsay1a ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uOTsMgVw6Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.wgQyT91NOW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uOTsMgVw6Z ++ cat /tmp/tmp.wgQyT91NOW ++ rm /tmp/tmp.uOTsMgVw6Z /tmp/tmp.wgQyT91NOW ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EjtwH4M925 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uTptR3oz3H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EjtwH4M925 ++ cat /tmp/tmp.uTptR3oz3H ++ rm /tmp/tmp.EjtwH4M925 /tmp/tmp.uTptR3oz3H ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wTYdv4clNL +++ mktemp ++ local LAST_ERR=/tmp/tmp.KcABgSjXXF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wTYdv4clNL ++ cat /tmp/tmp.KcABgSjXXF ++ rm /tmp/tmp.wTYdv4clNL /tmp/tmp.KcABgSjXXF ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z73tc6CotZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.5pB3DvyGdg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z73tc6CotZ ++ cat /tmp/tmp.5pB3DvyGdg ++ rm /tmp/tmp.Z73tc6CotZ /tmp/tmp.5pB3DvyGdg ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HyNktuDJF2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.osXuUIEvRv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HyNktuDJF2 ++ cat /tmp/tmp.osXuUIEvRv ++ rm /tmp/tmp.HyNktuDJF2 /tmp/tmp.osXuUIEvRv ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Snh79qeGxw +++ mktemp ++ local LAST_ERR=/tmp/tmp.lDt7BkiDFh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Snh79qeGxw ++ cat /tmp/tmp.lDt7BkiDFh ++ rm /tmp/tmp.Snh79qeGxw /tmp/tmp.lDt7BkiDFh ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WkCPRDGu6t +++ mktemp ++ local LAST_ERR=/tmp/tmp.gg0KedFGTB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WkCPRDGu6t ++ cat /tmp/tmp.gg0KedFGTB ++ rm /tmp/tmp.WkCPRDGu6t /tmp/tmp.gg0KedFGTB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.95Zt3sTidd +++ mktemp ++ local LAST_ERR=/tmp/tmp.DpPRhh27RE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.95Zt3sTidd ++ cat /tmp/tmp.DpPRhh27RE ++ rm /tmp/tmp.95Zt3sTidd /tmp/tmp.DpPRhh27RE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CKSEUkcH6A +++ mktemp ++ local LAST_ERR=/tmp/tmp.6NIeauUM7E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CKSEUkcH6A ++ cat /tmp/tmp.6NIeauUM7E ++ rm /tmp/tmp.CKSEUkcH6A /tmp/tmp.6NIeauUM7E ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A1qD4p4b5t +++ mktemp ++ local LAST_ERR=/tmp/tmp.PiFp5uyYnw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A1qD4p4b5t ++ cat /tmp/tmp.PiFp5uyYnw ++ rm /tmp/tmp.A1qD4p4b5t /tmp/tmp.PiFp5uyYnw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B99SP8dx8P +++ mktemp ++ local LAST_ERR=/tmp/tmp.Il5lf5eYnF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B99SP8dx8P ++ cat /tmp/tmp.Il5lf5eYnF ++ rm /tmp/tmp.B99SP8dx8P /tmp/tmp.Il5lf5eYnF ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ false == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:50:32+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.0DyGy3amID +++ mktemp ++ local LAST_ERR=/tmp/tmp.fnFYW4g7ci ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0DyGy3amID ++ cat /tmp/tmp.fnFYW4g7ci ++ rm /tmp/tmp.0DyGy3amID /tmp/tmp.fnFYW4g7ci ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.klZTkmfZi8 ++ mktemp + local LAST_ERR=/tmp/tmp.h9iq8gsyYR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.klZTkmfZi8 + cat /tmp/tmp.h9iq8gsyYR + rm /tmp/tmp.klZTkmfZi8 /tmp/tmp.h9iq8gsyYR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.ayVnFSAdut/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:50:33+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S0noBvE4KE egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.YdxZmqvOJp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S0noBvE4KE ++ cat /tmp/tmp.YdxZmqvOJp ++ rm /tmp/tmp.S0noBvE4KE /tmp/tmp.YdxZmqvOJp ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gJ4oIY9AU9 ++ mktemp + local LAST_ERR=/tmp/tmp.fWLDrZQ8Jh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gJ4oIY9AU9 + cat /tmp/tmp.fWLDrZQ8Jh + rm /tmp/tmp.gJ4oIY9AU9 /tmp/tmp.fWLDrZQ8Jh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.ayVnFSAdut/find-not-base + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 -not-base + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix=-not-base + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:50:35+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BuSkhJXn7Q egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.upoPnC681T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BuSkhJXn7Q ++ cat /tmp/tmp.upoPnC681T ++ rm /tmp/tmp.BuSkhJXn7Q /tmp/tmp.upoPnC681T ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.MAZkDlFkcN ++ mktemp + local LAST_ERR=/tmp/tmp.J5TpC27Jan + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MAZkDlFkcN + cat /tmp/tmp.J5TpC27Jan + rm /tmp/tmp.MAZkDlFkcN /tmp/tmp.J5TpC27Jan + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find-not-base.json /tmp/tmp.ayVnFSAdut/find-not-base + run_restore backup-minio + local backup_name=backup-minio + log 'drop collection' + set +o xtrace [2025-08-07T13:50:36+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1eCGObJTI9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fvfU7wjnl4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1eCGObJTI9 ++ cat /tmp/tmp.fvfU7wjnl4 ++ rm /tmp/tmp.1eCGObJTI9 /tmp/tmp.fvfU7wjnl4 ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.W4HAbcVaao ++ mktemp + local LAST_ERR=/tmp/tmp.uvnrKIsuxC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W4HAbcVaao Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("92dc6943-5075-461c-a6da-1394c6480a94") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.uvnrKIsuxC + rm /tmp/tmp.W4HAbcVaao /tmp/tmp.uvnrKIsuxC + return 0 + log 'check backup and restore -- backup-minio' + set +o xtrace [2025-08-07T13:50:38+0000] check backup and restore -- backup-minio + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.q2T4qMhtYV ++ mktemp + local LAST_ERR=/tmp/tmp.MAOIrm8XiW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q2T4qMhtYV perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.MAOIrm8XiW + rm /tmp/tmp.q2T4qMhtYV /tmp/tmp.MAOIrm8XiW + return 0 + run_recovery_check backup-minio + local backup_name=backup-minio + local compare_suffix=_restore + local base=true + wait_restore backup-minio some-name requested 0 3000 + local backup_name=backup-minio + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be createdOK Waiting psmdb-restore/restore-backup-minio to reach state "requested" ...OK after 3 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore + local resource=statefulset/some-name-rs0 + local postfix=_restore + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml + local new_result=/tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-25297", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Bghk0pTCzw ++ mktemp + local LAST_ERR=/tmp/tmp.5gaVTFdQWI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bghk0pTCzw + cat /tmp/tmp.5gaVTFdQWI + rm /tmp/tmp.Bghk0pTCzw /tmp/tmp.5gaVTFdQWI + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore.yml /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + wait_restore backup-minio some-name ready 0 1800 + local backup_name=backup-minio + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be createdOK Waiting psmdb-restore/restore-backup-minio to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MWaSw07deK +++ mktemp ++ local LAST_ERR=/tmp/tmp.1jTDAyutoA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MWaSw07deK ++ cat /tmp/tmp.1jTDAyutoA ++ rm /tmp/tmp.MWaSw07deK /tmp/tmp.1jTDAyutoA ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.woRLFjcvEH +++ mktemp ++ local LAST_ERR=/tmp/tmp.ooxm5NrTYh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.woRLFjcvEH ++ cat /tmp/tmp.ooxm5NrTYh ++ rm /tmp/tmp.woRLFjcvEH /tmp/tmp.ooxm5NrTYh ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GWDl80IcEa +++ mktemp ++ local LAST_ERR=/tmp/tmp.4FbDMllyBV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GWDl80IcEa ++ cat /tmp/tmp.4FbDMllyBV ++ rm /tmp/tmp.GWDl80IcEa /tmp/tmp.4FbDMllyBV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rtlNoeKSxO +++ mktemp ++ local LAST_ERR=/tmp/tmp.b8u6DYKyXd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rtlNoeKSxO ++ cat /tmp/tmp.b8u6DYKyXd ++ rm /tmp/tmp.rtlNoeKSxO /tmp/tmp.b8u6DYKyXd ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TBq7ym8OGL +++ mktemp ++ local LAST_ERR=/tmp/tmp.pL8bUuzc64 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TBq7ym8OGL ++ cat /tmp/tmp.pL8bUuzc64 ++ rm /tmp/tmp.TBq7ym8OGL /tmp/tmp.pL8bUuzc64 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mRC4KbzXN1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.29riUGvjuY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mRC4KbzXN1 ++ cat /tmp/tmp.29riUGvjuY ++ rm /tmp/tmp.mRC4KbzXN1 /tmp/tmp.29riUGvjuY ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fuI0JyZI9e +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZdsThfzqDc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fuI0JyZI9e ++ cat /tmp/tmp.ZdsThfzqDc ++ rm /tmp/tmp.fuI0JyZI9e /tmp/tmp.ZdsThfzqDc ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QUu2Jp1awa +++ mktemp ++ local LAST_ERR=/tmp/tmp.eF6AjLcEdR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QUu2Jp1awa ++ cat /tmp/tmp.eF6AjLcEdR ++ rm /tmp/tmp.QUu2Jp1awa /tmp/tmp.eF6AjLcEdR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qHLIebIfTo +++ mktemp ++ local LAST_ERR=/tmp/tmp.LkS9QL2oqy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qHLIebIfTo ++ cat /tmp/tmp.LkS9QL2oqy ++ rm /tmp/tmp.qHLIebIfTo /tmp/tmp.LkS9QL2oqy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GCVqzyqRMs +++ mktemp ++ local LAST_ERR=/tmp/tmp.35lgT7mRMI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GCVqzyqRMs ++ cat /tmp/tmp.35lgT7mRMI ++ rm /tmp/tmp.GCVqzyqRMs /tmp/tmp.35lgT7mRMI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tS3xqmGGOi +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Py1FmmlAZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tS3xqmGGOi ++ cat /tmp/tmp.1Py1FmmlAZ ++ rm /tmp/tmp.tS3xqmGGOi /tmp/tmp.1Py1FmmlAZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pWHsrvVWcD +++ mktemp ++ local LAST_ERR=/tmp/tmp.P4LHcsu4aD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pWHsrvVWcD ++ cat /tmp/tmp.P4LHcsu4aD ++ rm /tmp/tmp.pWHsrvVWcD /tmp/tmp.P4LHcsu4aD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yodPOJFtl0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.INGkSMyXCr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yodPOJFtl0 ++ cat /tmp/tmp.INGkSMyXCr ++ rm /tmp/tmp.yodPOJFtl0 /tmp/tmp.INGkSMyXCr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9LOPA76vRE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xo0i2ELDUx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9LOPA76vRE ++ cat /tmp/tmp.Xo0i2ELDUx ++ rm /tmp/tmp.9LOPA76vRE /tmp/tmp.Xo0i2ELDUx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IemnfOSDbm +++ mktemp ++ local LAST_ERR=/tmp/tmp.FAUfSPspAT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IemnfOSDbm ++ cat /tmp/tmp.FAUfSPspAT ++ rm /tmp/tmp.IemnfOSDbm /tmp/tmp.FAUfSPspAT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ilMp263w2t +++ mktemp ++ local LAST_ERR=/tmp/tmp.IgkszIP5ey ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ilMp263w2t ++ cat /tmp/tmp.IgkszIP5ey ++ rm /tmp/tmp.ilMp263w2t /tmp/tmp.IgkszIP5ey ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish. + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:58:46+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P8LzuTZH6q egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.j4W9Ah6fkc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P8LzuTZH6q ++ cat /tmp/tmp.j4W9Ah6fkc ++ rm /tmp/tmp.P8LzuTZH6q /tmp/tmp.j4W9Ah6fkc ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.67frBhjBnI ++ mktemp + local LAST_ERR=/tmp/tmp.UVtzqZtB6s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.67frBhjBnI + cat /tmp/tmp.UVtzqZtB6s + rm /tmp/tmp.67frBhjBnI /tmp/tmp.UVtzqZtB6s + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:58:47+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.IdWB3Hf5Ce +++ mktemp ++ local LAST_ERR=/tmp/tmp.215rdVRaKW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IdWB3Hf5Ce ++ cat /tmp/tmp.215rdVRaKW ++ rm /tmp/tmp.IdWB3Hf5Ce /tmp/tmp.215rdVRaKW ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NKEsBUBO6i ++ mktemp + local LAST_ERR=/tmp/tmp.FnvNV0bwsU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NKEsBUBO6i + cat /tmp/tmp.FnvNV0bwsU + rm /tmp/tmp.NKEsBUBO6i /tmp/tmp.FnvNV0bwsU + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T13:58:49+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2bqN8cTCfC egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.pQSBfJ7uBw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2bqN8cTCfC ++ cat /tmp/tmp.pQSBfJ7uBw ++ rm /tmp/tmp.2bqN8cTCfC /tmp/tmp.pQSBfJ7uBw ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wcqknpXuif ++ mktemp + local LAST_ERR=/tmp/tmp.ccsckQsSMa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wcqknpXuif + cat /tmp/tmp.ccsckQsSMa + rm /tmp/tmp.wcqknpXuif /tmp/tmp.ccsckQsSMa + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + desc 'Testing with arbiter and non-voting nodes' + set +o xtrace ----------------------------------------------------------------------------------- Testing with arbiter and non-voting nodes ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/some-name-arbiter-nv.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2009-645e0543"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.rv3ExXruii ++ mktemp + local LAST_ERR=/tmp/tmp.iYrhZ67JJ2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rv3ExXruii perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.iYrhZ67JJ2 + rm /tmp/tmp.rv3ExXruii /tmp/tmp.iYrhZ67JJ2 + return 0 + log 'check if all pods started' + set +o xtrace [2025-08-07T13:58:52+0000] check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TBtX9j17Xv +++ mktemp ++ local LAST_ERR=/tmp/tmp.yhicTcFBrk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TBtX9j17Xv ++ cat /tmp/tmp.yhicTcFBrk ++ rm /tmp/tmp.TBtX9j17Xv /tmp/tmp.yhicTcFBrk ++ return 0 + [[ true == \t\r\u\e ]] + wait_pod some-name-rs0-arbiter-0 + local pod=some-name-rs0-arbiter-0 + set +o xtrace waiting for pod/some-name-rs0-arbiter-0 to be ready.........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.huzrCPDXa9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xd9rVCsiCh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.huzrCPDXa9 ++ cat /tmp/tmp.Xd9rVCsiCh ++ rm /tmp/tmp.huzrCPDXa9 /tmp/tmp.Xd9rVCsiCh ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eVsGnzK2zC +++ mktemp ++ local LAST_ERR=/tmp/tmp.tzbyvzyGE2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eVsGnzK2zC ++ cat /tmp/tmp.tzbyvzyGE2 ++ rm /tmp/tmp.eVsGnzK2zC /tmp/tmp.tzbyvzyGE2 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......... + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TP0mmsvQle +++ mktemp ++ local LAST_ERR=/tmp/tmp.fY1Sy7NMhL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TP0mmsvQle ++ cat /tmp/tmp.fY1Sy7NMhL ++ rm /tmp/tmp.TP0mmsvQle /tmp/tmp.fY1Sy7NMhL ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + log 'running backups' + set +o xtrace [2025-08-07T13:59:42+0000] running backups + backup_name_minio=backup-minio-arbiter-nv + run_backup minio backup-minio-arbiter-nv + local storage=minio + local backup_name=backup-minio-arbiter-nv + local base=true + local backup_type=incremental + [[ true == \t\r\u\e ]] + backup_type=incremental-base + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-arbiter-nv/' + /usr/sbin/sed -e 's/storageName:/storageName: minio/' + yq '.spec.type="incremental-base"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.nEGMBbZuOK ++ mktemp + local LAST_ERR=/tmp/tmp.jy0aIdncUO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nEGMBbZuOK perconaservermongodbbackup.psmdb.percona.com/backup-minio-arbiter-nv created + cat /tmp/tmp.jy0aIdncUO + rm /tmp/tmp.nEGMBbZuOK /tmp/tmp.jy0aIdncUO + return 0 + wait_backup backup-minio-arbiter-nv + local backup_name=backup-minio-arbiter-nv + local target_state=ready + set +o xtrace waiting for backup-minio-arbiter-nv to reach ready state........ + check_backup_in_storage backup-minio-arbiter-nv minio rs0 + local backup=backup-minio-arbiter-nv + local storage_type=minio + local replset=rs0 + local file=filelist.pbm + local endpoint + case ${storage_type} in + endpoint=minio-service ++ get_backup_dest backup-minio-arbiter-nv ++ local backup_name=backup-minio-arbiter-nv ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio-arbiter-nv -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4avCgV5GUB +++ mktemp ++ local LAST_ERR=/tmp/tmp.sDLwjnrt9S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-arbiter-nv -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4avCgV5GUB ++ cat /tmp/tmp.sDLwjnrt9S ++ rm /tmp/tmp.4avCgV5GUB /tmp/tmp.sDLwjnrt9S ++ return 0 + backup_dest=operator-testing/2025-08-07T13:59:43Z + [[ minio == \m\i\n\i\o ]] + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:59:43Z/rs0/filelist.pbm + grep filelist.pbm ++ mktemp + local LAST_OUT=/tmp/tmp.gIN0CfwLyV ++ mktemp + local LAST_ERR=/tmp/tmp.QmymuroctI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2025-08-07T13:59:43Z/rs0/filelist.pbm + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gIN0CfwLyV + cat /tmp/tmp.QmymuroctI + rm /tmp/tmp.gIN0CfwLyV /tmp/tmp.QmymuroctI + return 0 2025-08-07 13:59:50 13930 filelist.pbm + run_restore backup-minio-arbiter-nv + local backup_name=backup-minio-arbiter-nv + log 'drop collection' + set +o xtrace [2025-08-07T13:59:59+0000] drop collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rJNX9OVXly +++ mktemp ++ local LAST_ERR=/tmp/tmp.5869vdGNED ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rJNX9OVXly ++ cat /tmp/tmp.5869vdGNED ++ rm /tmp/tmp.rJNX9OVXly /tmp/tmp.5869vdGNED ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ftB6QDhqIz ++ mktemp + local LAST_ERR=/tmp/tmp.l7X3di5YfZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ftB6QDhqIz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-arbiter-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-nv-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-3.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("693627ca-2841-48fd-823f-46d23cfb359a") } Percona Server for MongoDB server version: v7.0.22-12 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.l7X3di5YfZ + rm /tmp/tmp.ftB6QDhqIz /tmp/tmp.l7X3di5YfZ + return 0 + log 'check backup and restore -- backup-minio-arbiter-nv' + set +o xtrace [2025-08-07T14:00:01+0000] check backup and restore -- backup-minio-arbiter-nv + yq /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-arbiter-nv/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio-arbiter-nv/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2k99AhDFkm ++ mktemp + local LAST_ERR=/tmp/tmp.ngJZWHIIuB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2k99AhDFkm perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-arbiter-nv created + cat /tmp/tmp.ngJZWHIIuB + rm /tmp/tmp.2k99AhDFkm /tmp/tmp.ngJZWHIIuB + return 0 + run_recovery_check backup-minio-arbiter-nv _restore-arbiter-nv + local backup_name=backup-minio-arbiter-nv + local compare_suffix=_restore-arbiter-nv + local base=true + wait_restore backup-minio-arbiter-nv some-name requested 0 3000 + local backup_name=backup-minio-arbiter-nv + local cluster_name=some-name + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-arbiter-nv object to be createdOK Waiting psmdb-restore/restore-backup-minio-arbiter-nv to reach state "requested" ...OK after 3 minutes + [[ 0 -eq 1 ]] + echo + compare_kubectl statefulset/some-name-rs0 _restore-arbiter-nv + local resource=statefulset/some-name-rs0 + local postfix=_restore-arbiter-nv + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml + local new_result=/tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-incremental-25297", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.QiXapsYOQI ++ mktemp + local LAST_ERR=/tmp/tmp.62mUIh41jc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QiXapsYOQI + cat /tmp/tmp.62mUIh41jc + rm /tmp/tmp.QiXapsYOQI /tmp/tmp.62mUIh41jc + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/statefulset_some-name-rs0_restore-arbiter-nv.yml /tmp/tmp.ayVnFSAdut/statefulset_some-name-rs0.yml + wait_restore backup-minio-arbiter-nv some-name ready 0 1800 + local backup_name=backup-minio-arbiter-nv + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=1800 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-arbiter-nv object to be createdOK Waiting psmdb-restore/restore-backup-minio-arbiter-nv to reach state "ready" .OK after 1 minutes + [[ 0 -eq 1 ]] ++ kubectl_bin get psmdb some-name -o yaml ++ yq '.metadata.annotations."percona.com/resync-pbm"' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Abjq7QQvT3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.R7Qc2EDSVU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Abjq7QQvT3 ++ cat /tmp/tmp.R7Qc2EDSVU ++ rm /tmp/tmp.Abjq7QQvT3 /tmp/tmp.R7Qc2EDSVU ++ return 0 + '[' true == null ']' + echo + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n3iW3Ipz11 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GcHRGuLRne ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n3iW3Ipz11 ++ cat /tmp/tmp.GcHRGuLRne ++ rm /tmp/tmp.n3iW3Ipz11 /tmp/tmp.GcHRGuLRne ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q3XWJanU2E +++ mktemp ++ local LAST_ERR=/tmp/tmp.yf3uzkclWZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q3XWJanU2E ++ cat /tmp/tmp.yf3uzkclWZ ++ rm /tmp/tmp.q3XWJanU2E /tmp/tmp.yf3uzkclWZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2aN1ikYYOn +++ mktemp ++ local LAST_ERR=/tmp/tmp.3pPk0MxYIN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2aN1ikYYOn ++ cat /tmp/tmp.3pPk0MxYIN ++ rm /tmp/tmp.2aN1ikYYOn /tmp/tmp.3pPk0MxYIN ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qLx7yPHjlc +++ mktemp ++ local LAST_ERR=/tmp/tmp.OROaN1jdvt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qLx7yPHjlc ++ cat /tmp/tmp.OROaN1jdvt ++ rm /tmp/tmp.qLx7yPHjlc /tmp/tmp.OROaN1jdvt ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1Cgxhg9ACi +++ mktemp ++ local LAST_ERR=/tmp/tmp.azAbREFFty ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1Cgxhg9ACi ++ cat /tmp/tmp.azAbREFFty ++ rm /tmp/tmp.1Cgxhg9ACi /tmp/tmp.azAbREFFty ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KntBsf3KoA +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZC6OyiIXv4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KntBsf3KoA ++ cat /tmp/tmp.ZC6OyiIXv4 ++ rm /tmp/tmp.KntBsf3KoA /tmp/tmp.ZC6OyiIXv4 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V0k4gzBAeB +++ mktemp ++ local LAST_ERR=/tmp/tmp.mA2ljVW9D4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V0k4gzBAeB ++ cat /tmp/tmp.mA2ljVW9D4 ++ rm /tmp/tmp.V0k4gzBAeB /tmp/tmp.mA2ljVW9D4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TkEvKhfKiS +++ mktemp ++ local LAST_ERR=/tmp/tmp.nSwxpqL43E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TkEvKhfKiS ++ cat /tmp/tmp.nSwxpqL43E ++ rm /tmp/tmp.TkEvKhfKiS /tmp/tmp.nSwxpqL43E ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sJukSItElT +++ mktemp ++ local LAST_ERR=/tmp/tmp.SlprLbxdvc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sJukSItElT ++ cat /tmp/tmp.SlprLbxdvc ++ rm /tmp/tmp.sJukSItElT /tmp/tmp.SlprLbxdvc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.80tT0jh5lg +++ mktemp ++ local LAST_ERR=/tmp/tmp.E5eXzGCOjF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.80tT0jh5lg ++ cat /tmp/tmp.E5eXzGCOjF ++ rm /tmp/tmp.80tT0jh5lg /tmp/tmp.E5eXzGCOjF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.45swDjhQNl +++ mktemp ++ local LAST_ERR=/tmp/tmp.L4nFJMULGs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.45swDjhQNl ++ cat /tmp/tmp.L4nFJMULGs ++ rm /tmp/tmp.45swDjhQNl /tmp/tmp.L4nFJMULGs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ac6HRGH5CI +++ mktemp ++ local LAST_ERR=/tmp/tmp.NLooF5f032 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ac6HRGH5CI ++ cat /tmp/tmp.NLooF5f032 ++ rm /tmp/tmp.ac6HRGH5CI /tmp/tmp.NLooF5f032 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1LzglZZElP +++ mktemp ++ local LAST_ERR=/tmp/tmp.toc8CMu9nI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1LzglZZElP ++ cat /tmp/tmp.toc8CMu9nI ++ rm /tmp/tmp.1LzglZZElP /tmp/tmp.toc8CMu9nI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WoEaVMhBtM +++ mktemp ++ local LAST_ERR=/tmp/tmp.37n4fbiy5G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WoEaVMhBtM ++ cat /tmp/tmp.37n4fbiy5G ++ rm /tmp/tmp.WoEaVMhBtM /tmp/tmp.37n4fbiy5G ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.edQUlc7xSI +++ mktemp ++ local LAST_ERR=/tmp/tmp.S5y9ty2ReR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.edQUlc7xSI ++ cat /tmp/tmp.S5y9ty2ReR ++ rm /tmp/tmp.edQUlc7xSI /tmp/tmp.S5y9ty2ReR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bz9ROUJITe +++ mktemp ++ local LAST_ERR=/tmp/tmp.4cXo0sB0lG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bz9ROUJITe ++ cat /tmp/tmp.4cXo0sB0lG ++ rm /tmp/tmp.Bz9ROUJITe /tmp/tmp.4cXo0sB0lG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pryV3ZbAkU +++ mktemp ++ local LAST_ERR=/tmp/tmp.cweGKVaCZf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pryV3ZbAkU ++ cat /tmp/tmp.cweGKVaCZf ++ rm /tmp/tmp.pryV3ZbAkU /tmp/tmp.cweGKVaCZf ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish + [[ true == true ]] + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T14:08:08+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.FuR20QeHXz +++ mktemp ++ local LAST_ERR=/tmp/tmp.LXz4XjkgPA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FuR20QeHXz ++ cat /tmp/tmp.LXz4XjkgPA ++ rm /tmp/tmp.FuR20QeHXz /tmp/tmp.LXz4XjkgPA ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.x76I75f25l ++ mktemp + local LAST_ERR=/tmp/tmp.MFazBJ2Sij + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x76I75f25l + cat /tmp/tmp.MFazBJ2Sij + rm /tmp/tmp.x76I75f25l /tmp/tmp.MFazBJ2Sij + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T14:08:09+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.YTJGFL0oCz +++ mktemp ++ local LAST_ERR=/tmp/tmp.fpUnrb2pQd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YTJGFL0oCz ++ cat /tmp/tmp.fpUnrb2pQd ++ rm /tmp/tmp.YTJGFL0oCz /tmp/tmp.fpUnrb2pQd ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kAvSuLXyta ++ mktemp + local LAST_ERR=/tmp/tmp.zHR1aG99Ge + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kAvSuLXyta + cat /tmp/tmp.zHR1aG99Ge + rm /tmp/tmp.kAvSuLXyta /tmp/tmp.zHR1aG99Ge + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local 'full_command=db.test.find()' + [[ ! -z '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-08-07T14:08:11+0000] running db.test.find() in myApp + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 mongodb '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.bm8kLUfoOU +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kzj9CImZsj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bm8kLUfoOU ++ cat /tmp/tmp.Kzj9CImZsj ++ rm /tmp/tmp.bm8kLUfoOU /tmp/tmp.Kzj9CImZsj ++ return 0 + local client_container=psmdb-client-b9788d8bc-s6xf5 + local mongo_flag= + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.lJi81PgysS ++ mktemp + local LAST_ERR=/tmp/tmp.mKy69QmUVC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-b9788d8bc-s6xf5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-incremental-25297.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lJi81PgysS + cat /tmp/tmp.mKy69QmUVC + rm /tmp/tmp.lJi81PgysS /tmp/tmp.mKy69QmUVC + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/e2e-tests/demand-backup-incremental/compare/find.json /tmp/tmp.ayVnFSAdut/find + destroy demand-backup-incremental-25297 + local namespace=demand-backup-incremental-25297 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ wc -l ++ kubectl_bin get psmdb-backup --no-headers +++ mktemp ++ local LAST_OUT=/tmp/tmp.pDaFrJ3yMD +++ mktemp ++ local LAST_ERR=/tmp/tmp.LzXjSnrtod ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pDaFrJ3yMD ++ cat /tmp/tmp.LzXjSnrtod ++ rm /tmp/tmp.pDaFrJ3yMD /tmp/tmp.LzXjSnrtod ++ return 0 + '[' 6 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.J2r7WUIBnD ++ mktemp + local LAST_ERR=/tmp/tmp.MHKAopn9Xb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J2r7WUIBnD NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-aws-s3 some-name aws-s3 s3://operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:28Z incremental-base 693.52KB ready 45m 45m backup-azure-blob some-name azure-blob azure://operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:23:17Z incremental-base 809.29KB ready 44m 45m backup-gcp-cs some-name gcp-cs s3://operator-testing/psmdb-demand-backup-incremental/2025-08-07T13:22:55Z incremental-base 833.21KB ready 45m 45m backup-minio some-name minio s3://operator-testing/2025-08-07T13:23:40Z incremental-base 1.14MB ready 44m 44m backup-minio-arbiter-nv some-name minio s3://operator-testing/2025-08-07T13:59:43Z incremental-base 927.29KB ready 8m23s 8m31s backup-minio-not-base some-name minio s3://operator-testing/2025-08-07T13:24:18Z incremental 699.32KB ready 43m 43m + cat /tmp/tmp.MHKAopn9Xb + rm /tmp/tmp.J2r7WUIBnD /tmp/tmp.MHKAopn9Xb + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.KxzhYUOFcI ++ mktemp + local LAST_ERR=/tmp/tmp.5pfGIf52Gy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KxzhYUOFcI perconaservermongodbbackup.psmdb.percona.com "backup-aws-s3" deleted perconaservermongodbbackup.psmdb.percona.com "backup-azure-blob" deleted perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-arbiter-nv" deleted perconaservermongodbbackup.psmdb.percona.com "backup-minio-not-base" deleted + cat /tmp/tmp.5pfGIf52Gy + rm /tmp/tmp.KxzhYUOFcI /tmp/tmp.5pfGIf52Gy + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.K9Hl2BQHaV ++ mktemp + local LAST_ERR=/tmp/tmp.6CNQ7tTOji + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K9Hl2BQHaV customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.6CNQ7tTOji + rm /tmp/tmp.K9Hl2BQHaV /tmp/tmp.6CNQ7tTOji + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WPfpdGUJPh ++ mktemp + local LAST_ERR=/tmp/tmp.kNEz9dJnto + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WPfpdGUJPh + cat /tmp/tmp.kNEz9dJnto + rm /tmp/tmp.WPfpdGUJPh /tmp/tmp.kNEz9dJnto + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EfOtpdvop0 ++ mktemp + local LAST_ERR=/tmp/tmp.2jVToGvDdS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EfOtpdvop0 + cat /tmp/tmp.2jVToGvDdS + rm /tmp/tmp.EfOtpdvop0 /tmp/tmp.2jVToGvDdS + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-incremental-25297 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.SZs8w1GDkw ++ mktemp + local LAST_ERR=/tmp/tmp.pWxjuIc9h5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SZs8w1GDkw customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.pWxjuIc9h5 + rm /tmp/tmp.SZs8w1GDkw /tmp/tmp.pWxjuIc9h5 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.6AMFldIx0c ++ mktemp + local LAST_ERR=/tmp/tmp.D5Vo5G6GQL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2009/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6AMFldIx0c clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.D5Vo5G6GQL + rm /tmp/tmp.6AMFldIx0c /tmp/tmp.D5Vo5G6GQL + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6Nwmu4NNZj ++ mktemp + local LAST_ERR=/tmp/tmp.3QDMkrbVik + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.6Nwmu4NNZj + cat /tmp/tmp.3QDMkrbVik Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.6Nwmu4NNZj + cat /tmp/tmp.3QDMkrbVik Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.6Nwmu4NNZj + cat /tmp/tmp.3QDMkrbVik Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.6Nwmu4NNZj + cat /tmp/tmp.3QDMkrbVik Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.6Nwmu4NNZj /tmp/tmp.3QDMkrbVik + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-incremental-25297 + rm -rf /tmp/tmp.ayVnFSAdut + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.xLJMcseAzP + local LAST_OUT=/tmp/tmp.7OuMg9P2xM ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.rJtc8JKcaM + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.9njnDIvP4G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-incremental-25297 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator