Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/logs/pitr-to-new-cluster.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + source_cluster=some-name-source + target_cluster=some-name-target + backup_name_minio=backup-minio + setup_infra + create_infra pitr-to-new-cluster-13331 + local ns=pitr-to-new-cluster-13331 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.xbKMdGG12L ++ mktemp + local LAST_ERR=/tmp/tmp.zjlVyJaU8Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xbKMdGG12L customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.zjlVyJaU8Z + rm /tmp/tmp.xbKMdGG12L /tmp/tmp.zjlVyJaU8Z + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.QMu4FWEF3h ++ mktemp + local LAST_ERR=/tmp/tmp.d49fr6GqFw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QMu4FWEF3h customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.d49fr6GqFw + rm /tmp/tmp.QMu4FWEF3h /tmp/tmp.d49fr6GqFw + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Up5bpsinRE ++ mktemp + local LAST_ERR=/tmp/tmp.1DOEs8XNtQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Up5bpsinRE + cat /tmp/tmp.1DOEs8XNtQ + rm /tmp/tmp.Up5bpsinRE /tmp/tmp.1DOEs8XNtQ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.C4nsHqMHFB ++ mktemp + local LAST_ERR=/tmp/tmp.OS0nKv32wm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C4nsHqMHFB + cat /tmp/tmp.OS0nKv32wm + rm /tmp/tmp.C4nsHqMHFB /tmp/tmp.OS0nKv32wm + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.jj0e8hhWaR ++ mktemp + local LAST_ERR=/tmp/tmp.3ZdCoQ1Ivt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jj0e8hhWaR clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3ZdCoQ1Ivt + rm /tmp/tmp.jj0e8hhWaR /tmp/tmp.3ZdCoQ1Ivt + return 0 + check_crd_for_deletion PR-2232-bb80a94f + local git_tag=PR-2232-bb80a94f ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2232-bb80a94f/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q6zIXObkHQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.YVRMrET6Dj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.q6zIXObkHQ ++ cat /tmp/tmp.YVRMrET6Dj Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.q6zIXObkHQ ++ cat /tmp/tmp.YVRMrET6Dj Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.q6zIXObkHQ ++ cat /tmp/tmp.YVRMrET6Dj Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.q6zIXObkHQ ++ cat /tmp/tmp.YVRMrET6Dj Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.q6zIXObkHQ /tmp/tmp.YVRMrET6Dj ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns + awk '{print$1}' ++ mktemp + xargs kubectl delete ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.29GbinvKiX ++ mktemp + local LAST_OUT=/tmp/tmp.R7i0eu3ILJ ++ mktemp + local LAST_ERR=/tmp/tmp.wQIQtfVQ1D + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.179xSCnxBw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R7i0eu3ILJ + cat /tmp/tmp.179xSCnxBw + rm /tmp/tmp.R7i0eu3ILJ /tmp/tmp.179xSCnxBw + return 0 namespace "pitr-to-new-cluster-12355" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.29GbinvKiX namespace "psmdb-operator" deleted + cat /tmp/tmp.wQIQtfVQ1D + rm /tmp/tmp.29GbinvKiX /tmp/tmp.wQIQtfVQ1D + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.MPY09cldXM ++ mktemp + local LAST_ERR=/tmp/tmp.L1ESEOVv0R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MPY09cldXM + cat /tmp/tmp.L1ESEOVv0R + rm /tmp/tmp.MPY09cldXM /tmp/tmp.L1ESEOVv0R + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.VtUOp5HLun ++ mktemp + local LAST_ERR=/tmp/tmp.BMy82Anxkj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VtUOp5HLun namespace/psmdb-operator created + cat /tmp/tmp.BMy82Anxkj + rm /tmp/tmp.VtUOp5HLun /tmp/tmp.BMy82Anxkj + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ID91v5VAwS +++ mktemp ++ local LAST_ERR=/tmp/tmp.E1qxf8TLbp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ID91v5VAwS ++ cat /tmp/tmp.E1qxf8TLbp ++ rm /tmp/tmp.ID91v5VAwS /tmp/tmp.E1qxf8TLbp ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2232-bb80a94f-13-cluster14 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.qH9rS7kroJ ++ mktemp + local LAST_ERR=/tmp/tmp.tMThoMJfAI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2232-bb80a94f-13-cluster14 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qH9rS7kroJ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2232-bb80a94f-13-cluster14" modified. + cat /tmp/tmp.tMThoMJfAI + rm /tmp/tmp.qH9rS7kroJ /tmp/tmp.tMThoMJfAI + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2232-bb80a94f' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2232-bb80a94f ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qeyFGgMcMu ++ mktemp + local LAST_ERR=/tmp/tmp.sTpuUhg1fr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qeyFGgMcMu customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.sTpuUhg1fr + rm /tmp/tmp.qeyFGgMcMu /tmp/tmp.sTpuUhg1fr + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.k68X7uipSd ++ mktemp + local LAST_ERR=/tmp/tmp.rPBWmdoekw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k68X7uipSd clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.rPBWmdoekw + rm /tmp/tmp.k68X7uipSd /tmp/tmp.rPBWmdoekw + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2232-bb80a94f") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XKS7jyV4EN ++ mktemp + local LAST_ERR=/tmp/tmp.aUPBCSRd87 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XKS7jyV4EN deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.aUPBCSRd87 + rm /tmp/tmp.XKS7jyV4EN /tmp/tmp.aUPBCSRd87 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.pzsxlaTMbr +++ mktemp ++ local LAST_ERR=/tmp/tmp.RZHKrMbQxt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pzsxlaTMbr ++ cat /tmp/tmp.RZHKrMbQxt ++ rm /tmp/tmp.pzsxlaTMbr /tmp/tmp.RZHKrMbQxt ++ return 0 + wait_operator_pod percona-server-mongodb-operator-c9c7475c6-n6zrk + local pod=percona-server-mongodb-operator-c9c7475c6-n6zrk + set +o xtrace waiting for pod/percona-server-mongodb-operator-c9c7475c6-n6zrk to be ready.OK + echo 'Print operator info from log' Print operator info from log ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator + grep 'Manager starting up' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t2RrhQAnnd +++ mktemp ++ local LAST_ERR=/tmp/tmp.BTjfhA0bHw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t2RrhQAnnd ++ cat /tmp/tmp.BTjfhA0bHw ++ rm /tmp/tmp.t2RrhQAnnd /tmp/tmp.BTjfhA0bHw ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-c9c7475c6-n6zrk ++ mktemp + local LAST_OUT=/tmp/tmp.k6KKhzOZEo ++ mktemp + local LAST_ERR=/tmp/tmp.XjFeTwivpr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-c9c7475c6-n6zrk + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k6KKhzOZEo + cat /tmp/tmp.XjFeTwivpr + rm /tmp/tmp.k6KKhzOZEo /tmp/tmp.XjFeTwivpr + return 0 2026-03-11T17:22:36.566Z INFO setup Manager starting up {"gitCommit": "bb80a94fe259eaaaf83e80d96a720e6fe7708ede", "gitBranch": "PR-2232-bb80a94f", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace pitr-to-new-cluster-13331 + local namespace=pitr-to-new-cluster-13331 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.q47XYMJSRM ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces pitr-to-new-cluster-13331' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pitr-to-new-cluster-13331 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pitr-to-new-cluster-13331 --ignore-not-found ++ mktemp + local LAST_ERR=/tmp/tmp.fJpxLqrD4x + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.Jl3Z0Lw3CI + for i in $(seq 0 2) + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.6aALEgvnPa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace pitr-to-new-cluster-13331 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q47XYMJSRM + cat /tmp/tmp.fJpxLqrD4x + rm /tmp/tmp.q47XYMJSRM /tmp/tmp.fJpxLqrD4x + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Jl3Z0Lw3CI + cat /tmp/tmp.6aALEgvnPa + rm /tmp/tmp.Jl3Z0Lw3CI /tmp/tmp.6aALEgvnPa + return 0 + kubectl_bin wait --for=delete namespace pitr-to-new-cluster-13331 ++ mktemp + local LAST_OUT=/tmp/tmp.fmQ6O6qF1M ++ mktemp + local LAST_ERR=/tmp/tmp.UkG5bKm4UI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace pitr-to-new-cluster-13331 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fmQ6O6qF1M + cat /tmp/tmp.UkG5bKm4UI + rm /tmp/tmp.fmQ6O6qF1M /tmp/tmp.UkG5bKm4UI + return 0 + desc 'create namespace pitr-to-new-cluster-13331' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pitr-to-new-cluster-13331 ----------------------------------------------------------------------------------- + kubectl_bin create namespace pitr-to-new-cluster-13331 ++ mktemp + local LAST_OUT=/tmp/tmp.0AAxX1k23S ++ mktemp + local LAST_ERR=/tmp/tmp.l9n2M7jtDh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace pitr-to-new-cluster-13331 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0AAxX1k23S namespace/pitr-to-new-cluster-13331 created + cat /tmp/tmp.l9n2M7jtDh + rm /tmp/tmp.0AAxX1k23S /tmp/tmp.l9n2M7jtDh + return 0 + set_kube_ctx pitr-to-new-cluster-13331 + local namespace=pitr-to-new-cluster-13331 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6KUKmukJek +++ mktemp ++ local LAST_ERR=/tmp/tmp.eM8KxjFR4r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6KUKmukJek ++ cat /tmp/tmp.eM8KxjFR4r ++ rm /tmp/tmp.6KUKmukJek /tmp/tmp.eM8KxjFR4r ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2232-bb80a94f-13-cluster14 --namespace=pitr-to-new-cluster-13331 ++ mktemp + local LAST_OUT=/tmp/tmp.LguCI1xQhn ++ mktemp + local LAST_ERR=/tmp/tmp.hBnVGeT4jO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2232-bb80a94f-13-cluster14 --namespace=pitr-to-new-cluster-13331 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LguCI1xQhn Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2232-bb80a94f-13-cluster14" modified. + cat /tmp/tmp.hBnVGeT4jO + rm /tmp/tmp.LguCI1xQhn /tmp/tmp.hBnVGeT4jO + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Wed Mar 11 17:23:16 2026 NAMESPACE: pitr-to-new-cluster-13331 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.pitr-to-new-cluster-13331.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace pitr-to-new-cluster-13331 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace pitr-to-new-cluster-13331 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace pitr-to-new-cluster-13331 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace pitr-to-new-cluster-13331 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.df7foKNODs +++ mktemp ++ local LAST_ERR=/tmp/tmp.a7hlH49rBT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.df7foKNODs ++ cat /tmp/tmp.a7hlH49rBT ++ rm /tmp/tmp.df7foKNODs /tmp/tmp.a7hlH49rBT ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-2lchs + wait_pod minio-service-6d5f646cdc-2lchs + local pod=minio-service-6d5f646cdc-2lchs + set +o xtrace waiting for pod/minio-service-6d5f646cdc-2lchs to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.pitr-to-new-cluster-13331.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.rWdjN1nxAe ++ mktemp + local LAST_ERR=/tmp/tmp.tRCHfgiUSM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rWdjN1nxAe pod "aws-cli" deleted from pitr-to-new-cluster-13331 namespace + cat /tmp/tmp.tRCHfgiUSM All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.rWdjN1nxAe /tmp/tmp.tRCHfgiUSM + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/conf/minio-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.cHUh6JFZZf ++ mktemp + local LAST_ERR=/tmp/tmp.fs9a8pFg1H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/conf/minio-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cHUh6JFZZf secret/some-users created deployment.apps/psmdb-client created secret/minio-secret created + cat /tmp/tmp.fs9a8pFg1H + rm /tmp/tmp.cHUh6JFZZf /tmp/tmp.fs9a8pFg1H + return 0 + setup_source_cluster + desc 'create PSMDB cluster: some-name-source' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster: some-name-source ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/some-name-source.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/some-name-source.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/some-name-source.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + local LAST_OUT=/tmp/tmp.norciR2GE0 + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2232-bb80a94f"' + local LAST_ERR=/tmp/tmp.As323l3ZCL + local exit_status=0 + local timeout=4 + /usr/sbin/sed -e s/NAME_SPACE/pitr-to-new-cluster-13331/g ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.norciR2GE0 perconaservermongodb.psmdb.percona.com/some-name-source created + cat /tmp/tmp.As323l3ZCL + rm /tmp/tmp.norciR2GE0 /tmp/tmp.As323l3ZCL + return 0 + wait_for_running some-name-source-rs0 3 + local name=some-name-source-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name-source ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-source-rs0-0 + local pod=some-name-source-rs0-0 + set +o xtrace waiting for pod/some-name-source-rs0-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-source-rs0-1 + local pod=some-name-source-rs0-1 + set +o xtrace waiting for pod/some-name-source-rs0-1 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FKRM4HafRL +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZJH06yfSHK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FKRM4HafRL ++ cat /tmp/tmp.ZJH06yfSHK ++ rm /tmp/tmp.FKRM4HafRL /tmp/tmp.ZJH06yfSHK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-source-rs0-2 + local pod=some-name-source-rs0-2 + set +o xtrace waiting for pod/some-name-source-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dTRW7Xr1uI +++ mktemp ++ local LAST_ERR=/tmp/tmp.3YQIUUfqvu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dTRW7Xr1uI ++ cat /tmp/tmp.3YQIUUfqvu ++ rm /tmp/tmp.dTRW7Xr1uI /tmp/tmp.3YQIUUfqvu ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zl2Jgj5YDK +++ mktemp ++ local LAST_ERR=/tmp/tmp.aweARGNsMf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-source -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zl2Jgj5YDK ++ cat /tmp/tmp.aweARGNsMf ++ rm /tmp/tmp.Zl2Jgj5YDK /tmp/tmp.aweARGNsMf ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............. + wait_backup_agent some-name-source-rs0-0 + local agent_pod=some-name-source-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-source-rs0-0...2026-03-11T17:25:12.000+0000 I listening for the commands + wait_backup_agent some-name-source-rs0-1 + local agent_pod=some-name-source-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-source-rs0-1...2026-03-11T17:25:47.000+0000 I listening for the commands + wait_backup_agent some-name-source-rs0-2 + local agent_pod=some-name-source-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-source-rs0-2...2026-03-11T17:25:49.000+0000 I listening for the commands + create_user some-name-source + local cluster=some-name-source + log 'create user myApp' + set +o xtrace [2026-03-11T17:25:53+0000] create user myApp + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-source-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-source-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@some-name-source-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YYYU1Jdb4w +++ mktemp ++ local LAST_ERR=/tmp/tmp.av9w4axfsG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YYYU1Jdb4w ++ cat /tmp/tmp.av9w4axfsG ++ rm /tmp/tmp.YYYU1Jdb4w /tmp/tmp.av9w4axfsG ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.C1JHMcwTX9 ++ mktemp + local LAST_ERR=/tmp/tmp.f5f7TLLGHC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C1JHMcwTX9 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("fc4f1096-99bf-42d2-8cd6-efb72935441a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.f5f7TLLGHC + rm /tmp/tmp.C1JHMcwTX9 /tmp/tmp.f5f7TLLGHC + return 0 + write_document some-name-source 100500 + local cluster=some-name-source + local x=100500 + log 'write document: 100500' + set +o xtrace [2026-03-11T17:25:56+0000] write document: 100500 + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1lnPkZmUbz +++ mktemp ++ local LAST_ERR=/tmp/tmp.jGsztiWDbE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1lnPkZmUbz ++ cat /tmp/tmp.jGsztiWDbE ++ rm /tmp/tmp.1lnPkZmUbz /tmp/tmp.jGsztiWDbE ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Y8xCvv2W5h ++ mktemp + local LAST_ERR=/tmp/tmp.Dv0AOfxuFK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y8xCvv2W5h Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("759d457a-3658-45af-902d-7282a01133b7") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Dv0AOfxuFK + rm /tmp/tmp.Y8xCvv2W5h /tmp/tmp.Dv0AOfxuFK + return 0 + sleep_with_log 360 'wait for PBM-1265 workaround' + local d=360 + local 'msg=wait for PBM-1265 workaround' + log 'wait for PBM-1265 workaround. sleeping for 360 seconds' + set +o xtrace [2026-03-11T17:25:58+0000] wait for PBM-1265 workaround. sleeping for 360 seconds + sleep 360 + compare_mongo_cmd find myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local command=find + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-11T17:31:58+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 mongodb '' '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bKwMlwnaLA +++ mktemp ++ local LAST_ERR=/tmp/tmp.lQIh5Dy9Hz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bKwMlwnaLA ++ cat /tmp/tmp.lQIh5Dy9Hz ++ rm /tmp/tmp.bKwMlwnaLA /tmp/tmp.lQIh5Dy9Hz ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.TlfBUkpcAJ ++ mktemp + local LAST_ERR=/tmp/tmp.YGvVOgAwIc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TlfBUkpcAJ + cat /tmp/tmp.YGvVOgAwIc + rm /tmp/tmp.TlfBUkpcAJ /tmp/tmp.YGvVOgAwIc + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/find.json /tmp/tmp.OiE4KhZV95/find + run_backup some-name-source backup-minio logical + local cluster=some-name-source + local name=backup-minio + local type=logical + desc 'run backup backup-minio-logical' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-logical ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-logical/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name-source/' + /usr/sbin/sed -e 's/type:/type: logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.tmmyrACbOg ++ mktemp + local LAST_ERR=/tmp/tmp.YsjP8cPw4S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tmmyrACbOg perconaservermongodbbackup.psmdb.percona.com/backup-minio-logical created + cat /tmp/tmp.YsjP8cPw4S + rm /tmp/tmp.tmmyrACbOg /tmp/tmp.YsjP8cPw4S + return 0 + wait_backup backup-minio-logical + local backup_name=backup-minio-logical + local target_state=ready + set +o xtrace waiting for backup-minio-logical to reach ready state.......OK + sleep_with_log 70 'wait for oplog chunks to be uploaded' + local d=70 + local 'msg=wait for oplog chunks to be uploaded' + log 'wait for oplog chunks to be uploaded. sleeping for 70 seconds' + set +o xtrace [2026-03-11T17:32:16+0000] wait for oplog chunks to be uploaded. sleeping for 70 seconds + sleep 70 + run_backup some-name-source backup-minio physical + local cluster=some-name-source + local name=backup-minio + local type=physical + desc 'run backup backup-minio-physical' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-minio-physical ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: backup-minio-physical/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name-source/' + /usr/sbin/sed -e 's/type:/type: physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Sks9aWnnHN ++ mktemp + local LAST_ERR=/tmp/tmp.ctrYY4BKRY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Sks9aWnnHN perconaservermongodbbackup.psmdb.percona.com/backup-minio-physical created + cat /tmp/tmp.ctrYY4BKRY + rm /tmp/tmp.Sks9aWnnHN /tmp/tmp.ctrYY4BKRY + return 0 + wait_backup backup-minio-physical + local backup_name=backup-minio-physical + local target_state=ready + set +o xtrace waiting for backup-minio-physical to reach ready state.......OK + desc 'write data for PiTR' + set +o xtrace ----------------------------------------------------------------------------------- write data for PiTR ----------------------------------------------------------------------------------- + write_document some-name-source 100501 + local cluster=some-name-source + local x=100501 + log 'write document: 100501' + set +o xtrace [2026-03-11T17:33:41+0000] write document: 100501 + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PoEZZOjzHo +++ mktemp ++ local LAST_ERR=/tmp/tmp.URnUSROPsO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PoEZZOjzHo ++ cat /tmp/tmp.URnUSROPsO ++ rm /tmp/tmp.PoEZZOjzHo /tmp/tmp.URnUSROPsO ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kxUpC75elW ++ mktemp + local LAST_ERR=/tmp/tmp.x7iio1GMci + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kxUpC75elW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0af3eee8-b8ef-4679-97ed-a269d472cb17") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.x7iio1GMci + rm /tmp/tmp.kxUpC75elW /tmp/tmp.x7iio1GMci + return 0 + write_document some-name-source 100502 + local cluster=some-name-source + local x=100502 + log 'write document: 100502' + set +o xtrace [2026-03-11T17:33:43+0000] write document: 100502 + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KCqJxwkown +++ mktemp ++ local LAST_ERR=/tmp/tmp.7I98NDLPBx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KCqJxwkown ++ cat /tmp/tmp.7I98NDLPBx ++ rm /tmp/tmp.KCqJxwkown /tmp/tmp.7I98NDLPBx ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ydqtyPtCvu ++ mktemp + local LAST_ERR=/tmp/tmp.sqngRJaKok + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ydqtyPtCvu Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("5ad1058a-4e48-49a4-8be4-67c9e0c1c8c7") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.sqngRJaKok + rm /tmp/tmp.ydqtyPtCvu /tmp/tmp.sqngRJaKok + return 0 + write_document some-name-source 100503 + local cluster=some-name-source + local x=100503 + log 'write document: 100503' + set +o xtrace [2026-03-11T17:33:46+0000] write document: 100503 + run_mongo 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zwwC7TG9AO +++ mktemp ++ local LAST_ERR=/tmp/tmp.wRBR73U8WI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zwwC7TG9AO ++ cat /tmp/tmp.wRBR73U8WI ++ rm /tmp/tmp.zwwC7TG9AO /tmp/tmp.wRBR73U8WI ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ijnVStKMkx ++ mktemp + local LAST_ERR=/tmp/tmp.9IDp2K7PmF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ijnVStKMkx Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3e85b357-4c71-4801-b60f-1e2597acd3cc") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.9IDp2K7PmF + rm /tmp/tmp.ijnVStKMkx /tmp/tmp.9IDp2K7PmF + return 0 + sleep_with_log 70 'wait for oplog chunks to be uploaded' + local d=70 + local 'msg=wait for oplog chunks to be uploaded' + log 'wait for oplog chunks to be uploaded. sleeping for 70 seconds' + set +o xtrace [2026-03-11T17:33:49+0000] wait for oplog chunks to be uploaded. sleeping for 70 seconds + sleep 70 + compare_mongo_cmd find myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 -2nd .svc.cluster.local myApp test 'sort( { x: 1 } )' + local command=find + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local 'sort=sort( { x: 1 } )' + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n sort( { x: 1 } ) ]] + full_command='db.test.find().sort( { x: 1 } )' + log 'running db.test.find().sort( { x: 1 } ) in myApp' + set +o xtrace [2026-03-11T17:34:59+0000] running db.test.find().sort( { x: 1 } ) in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find().sort( { x: 1 } )' myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find().sort( { x: 1 } )' + local uri=myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.erm5nqqKaq +++ mktemp ++ local LAST_ERR=/tmp/tmp.4dg69ZYDdW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.erm5nqqKaq ++ cat /tmp/tmp.4dg69ZYDdW ++ rm /tmp/tmp.erm5nqqKaq /tmp/tmp.4dg69ZYDdW ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Jeo1sFiwLi ++ mktemp + local LAST_ERR=/tmp/tmp.qlATKp29Bh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-source-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Jeo1sFiwLi + cat /tmp/tmp.qlATKp29Bh + rm /tmp/tmp.Jeo1sFiwLi /tmp/tmp.qlATKp29Bh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/find-2nd.json /tmp/tmp.OiE4KhZV95/find-2nd + log 'PiTR data is ready: OK' + set +o xtrace [2026-03-11T17:35:01+0000] PiTR data is ready: OK + setup_target_cluster + desc 'create PSMDB cluster: some-name-target' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster: some-name-target ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/some-name-target.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/some-name-target.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/some-name-target.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + kubectl_bin apply -f - + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2232-bb80a94f"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/pitr-to-new-cluster-13331/g ++ mktemp + local LAST_OUT=/tmp/tmp.kDgq5rjY1V ++ mktemp + local LAST_ERR=/tmp/tmp.o0Ag6OCAeR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kDgq5rjY1V perconaservermongodb.psmdb.percona.com/some-name-target created + cat /tmp/tmp.o0Ag6OCAeR + rm /tmp/tmp.kDgq5rjY1V /tmp/tmp.o0Ag6OCAeR + return 0 + wait_for_running some-name-target-rs0 3 + local name=some-name-target-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name-target ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-target-rs0-0 + local pod=some-name-target-rs0-0 + set +o xtrace waiting for pod/some-name-target-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-target-rs0-1 + local pod=some-name-target-rs0-1 + set +o xtrace waiting for pod/some-name-target-rs0-1 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B0MSXTkMbU +++ mktemp ++ local LAST_ERR=/tmp/tmp.8rYCJXJMO8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B0MSXTkMbU ++ cat /tmp/tmp.8rYCJXJMO8 ++ rm /tmp/tmp.B0MSXTkMbU /tmp/tmp.8rYCJXJMO8 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-target-rs0-2 + local pod=some-name-target-rs0-2 + set +o xtrace waiting for pod/some-name-target-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NBdTqWLTNJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.1avHMbQ9NF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NBdTqWLTNJ ++ cat /tmp/tmp.1avHMbQ9NF ++ rm /tmp/tmp.NBdTqWLTNJ /tmp/tmp.1avHMbQ9NF ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ty95Ia8u1p +++ mktemp ++ local LAST_ERR=/tmp/tmp.RdweeYY3g2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ty95Ia8u1p ++ cat /tmp/tmp.RdweeYY3g2 ++ rm /tmp/tmp.ty95Ia8u1p /tmp/tmp.RdweeYY3g2 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............... + wait_backup_agent some-name-target-rs0-0 + local agent_pod=some-name-target-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-target-rs0-0...2026-03-11T17:36:27.000+0000 I listening for the commands + wait_backup_agent some-name-target-rs0-1 + local agent_pod=some-name-target-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-target-rs0-1...2026-03-11T17:37:03.000+0000 I listening for the commands + wait_backup_agent some-name-target-rs0-2 + local agent_pod=some-name-target-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-target-rs0-2...2026-03-11T17:37:07.000+0000 I listening for the commands + wait_for_pbm_operations some-name-target + local cluster=some-name-target + set +o xtrace waiting for PBM operation to finish.OK + desc 'Case 1: Logical restore' + set +o xtrace ----------------------------------------------------------------------------------- Case 1: Logical restore ----------------------------------------------------------------------------------- + test_case_1 + run_restore some-name-target backup-minio logical + local cluster=some-name-target + local backup_name=backup-minio + local backup_type=logical ++ get_backup_dest backup-minio-logical ++ local backup_name=backup-minio-logical ++ kubectl_bin get psmdb-backup backup-minio-logical -o 'jsonpath={.status.destination}' +++ mktemp ++ sed -e 's/.json$//' ++ sed 's|s3://||' ++ sed 's|azure://||' ++ local LAST_OUT=/tmp/tmp.g7nHpOYoKq ++ sed 's|gs://||' +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ufa4XUpfOC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-logical -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g7nHpOYoKq ++ cat /tmp/tmp.Ufa4XUpfOC ++ rm /tmp/tmp.g7nHpOYoKq /tmp/tmp.Ufa4XUpfOC ++ return 0 + local destination=operator-testing/some-name-source/2026-03-11T17:32:04Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/restore-backupsource.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-logical/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name-target/' + /usr/sbin/sed -e 's/type:/type: logical/' + /usr/sbin/sed -e 's|DESTINATION|operator-testing/some-name-source/2026-03-11T17:32:04Z|' + /usr/sbin/sed -e s/pitrType:/type:/ + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.gIDm0f0fZw ++ mktemp + local LAST_ERR=/tmp/tmp.tuHXYcG4Rl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gIDm0f0fZw perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-logical created + cat /tmp/tmp.tuHXYcG4Rl + rm /tmp/tmp.gIDm0f0fZw /tmp/tmp.tuHXYcG4Rl + return 0 + wait_restore backup-minio-logical some-name-target requested 0 + local backup_name=backup-minio-logical + local cluster_name=some-name-target + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-logical object to be created.OK Waiting psmdb-restore/restore-backup-minio-logical to reach state "requested" .OK after 0 minutes + [[ 0 -eq 1 ]] + compare_pbm_config some-name-target source + local cluster=some-name-target + local compare_suffix=source + local container=backup-agent + local pbm_binary=pbm + [[ backup-agent == \m\o\n\g\o\d ]] + kubectl_bin exec some-name-target-rs0-0 -c backup-agent -- pbm config ++ mktemp + local LAST_OUT=/tmp/tmp.yPs6i7XSen ++ mktemp + local LAST_ERR=/tmp/tmp.pS6lyGiqTo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-target-rs0-0 -c backup-agent -- pbm config + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yPs6i7XSen + cat /tmp/tmp.pS6lyGiqTo + rm /tmp/tmp.yPs6i7XSen /tmp/tmp.pS6lyGiqTo + return 0 + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/pbm_config_source.yml /tmp/tmp.OiE4KhZV95/pbm_config.yml + log 'Backup source is set as main storage during restore: OK' + set +o xtrace [2026-03-11T17:37:23+0000] Backup source is set as main storage during restore: OK ++ date +%s + local now_ts=1773250643 + wait_restore backup-minio-logical some-name-target ready + local backup_name=backup-minio-logical + local cluster_name=some-name-target + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-logical object to be created.OK Waiting psmdb-restore/restore-backup-minio-logical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.getPpvJ6fk +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ylq3cNYu4B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.getPpvJ6fk ++ cat /tmp/tmp.Ylq3cNYu4B ++ rm /tmp/tmp.getPpvJ6fk /tmp/tmp.Ylq3cNYu4B ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_pbm_config some-name-target target + local cluster=some-name-target + local compare_suffix=target + local container=backup-agent + local pbm_binary=pbm + [[ backup-agent == \m\o\n\g\o\d ]] + kubectl_bin exec some-name-target-rs0-0 -c backup-agent -- pbm config ++ mktemp + local LAST_OUT=/tmp/tmp.p2ZF8p1M3F ++ mktemp + local LAST_ERR=/tmp/tmp.rifyBfSHOR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-target-rs0-0 -c backup-agent -- pbm config + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p2ZF8p1M3F + cat /tmp/tmp.rifyBfSHOR + rm /tmp/tmp.p2ZF8p1M3F /tmp/tmp.rifyBfSHOR + return 0 + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/pbm_config_target.yml /tmp/tmp.OiE4KhZV95/pbm_config.yml + log 'Main storage is reverted after backup-minio-logical: OK' + set +o xtrace [2026-03-11T17:37:42+0000] Main storage is reverted after backup-minio-logical: OK + wait_for_pbm_operations some-name-target + local cluster=some-name-target + set +o xtrace waiting for PBM operation to finish.OK + check_if_resync_triggered some-name-target 1773250643 + local cluster=some-name-target + local ts=1773250643 + log 'checking if resync is triggered after 1773250643' + set +o xtrace [2026-03-11T17:37:44+0000] checking if resync is triggered after 1773250643 ++ run_mongo 'use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250643 }})' backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331 ++ local 'command=use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250643 }})' ++ local uri=backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local replica_set=rs0 ++ [[ backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SAe5xd7lbO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.18yl0N2GZM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.SAe5xd7lbO +++ cat /tmp/tmp.18yl0N2GZM +++ rm /tmp/tmp.SAe5xd7lbO /tmp/tmp.18yl0N2GZM +++ return 0 ++ local client_container=psmdb-client-bb8b97679-klgql ++ kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250643 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nmor13xIYb +++ mktemp ++ local LAST_ERR=/tmp/tmp.FoXgTM4TPJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250643 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.nmor13xIYb ++ cat /tmp/tmp.FoXgTM4TPJ command terminated with exit code 1 ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250643 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.nmor13xIYb ++ cat /tmp/tmp.FoXgTM4TPJ command terminated with exit code 1 ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250643 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.nmor13xIYb ++ cat /tmp/tmp.FoXgTM4TPJ command terminated with exit code 1 ++ sleep 8 ++ cat /tmp/tmp.nmor13xIYb ++ cat /tmp/tmp.FoXgTM4TPJ command terminated with exit code 1 ++ rm /tmp/tmp.nmor13xIYb /tmp/tmp.FoXgTM4TPJ ++ return 1 + local 'count=Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e356d434-5471-470b-ac2d-ed8b20cf57a4") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("390af790-1df3-4902-a7a5-bee4a21d207a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye' + [[ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e356d434-5471-470b-ac2d-ed8b20cf57a4") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("390af790-1df3-4902-a7a5-bee4a21d207a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye -lt 1 ]] ./e2e-tests/pitr-to-new-cluster/run: line 91: [[: Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e356d434-5471-470b-ac2d-ed8b20cf57a4") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("390af790-1df3-4902-a7a5-bee4a21d207a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye: syntax error: invalid arithmetic operator (error token is ".4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e356d434-5471-470b-ac2d-ed8b20cf57a4") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("390af790-1df3-4902-a7a5-bee4a21d207a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c100a6e8-1349-4d19-8e1f-d5b6d1b3f6f9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye") + compare_mongo_cmd find myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 -2nd .svc.cluster.local myApp test 'sort( { x: 1 } )' + local command=find + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local 'sort=sort( { x: 1 } )' + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n sort( { x: 1 } ) ]] + full_command='db.test.find().sort( { x: 1 } )' + log 'running db.test.find().sort( { x: 1 } ) in myApp' + set +o xtrace [2026-03-11T17:38:02+0000] running db.test.find().sort( { x: 1 } ) in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find().sort( { x: 1 } )' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find().sort( { x: 1 } )' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FoSj0H9yyx +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fi4l0VVrB1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FoSj0H9yyx ++ cat /tmp/tmp.Fi4l0VVrB1 ++ rm /tmp/tmp.FoSj0H9yyx /tmp/tmp.Fi4l0VVrB1 ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UCNbbfyKbv ++ mktemp + local LAST_ERR=/tmp/tmp.M6bHo8P0TZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UCNbbfyKbv + cat /tmp/tmp.M6bHo8P0TZ + rm /tmp/tmp.UCNbbfyKbv /tmp/tmp.M6bHo8P0TZ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/find-2nd.json /tmp/tmp.OiE4KhZV95/find-2nd + log 'Data is restored from backup-minio-logical: OK' + set +o xtrace [2026-03-11T17:38:05+0000] Data is restored from backup-minio-logical: OK + desc 'Case 1: Logical restore: OK' + set +o xtrace ----------------------------------------------------------------------------------- Case 1: Logical restore: OK ----------------------------------------------------------------------------------- + desc 'Case 2: Physical restore' + set +o xtrace ----------------------------------------------------------------------------------- Case 2: Physical restore ----------------------------------------------------------------------------------- + test_case_2 + run_restore some-name-target backup-minio physical + local cluster=some-name-target + local backup_name=backup-minio + local backup_type=physical ++ get_backup_dest backup-minio-physical ++ local backup_name=backup-minio-physical ++ kubectl_bin get psmdb-backup backup-minio-physical -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.EXzSs3XsdU +++ mktemp ++ local LAST_ERR=/tmp/tmp.SYzPXp7IZV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio-physical -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EXzSs3XsdU ++ cat /tmp/tmp.SYzPXp7IZV ++ rm /tmp/tmp.EXzSs3XsdU /tmp/tmp.SYzPXp7IZV ++ return 0 + local destination=operator-testing/some-name-source/2026-03-11T17:33:28Z + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/restore-backupsource.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio-physical/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name-target/' + /usr/sbin/sed -e 's/type:/type: physical/' + /usr/sbin/sed -e s/pitrType:/type:/ + /usr/sbin/sed -e 's|DESTINATION|operator-testing/some-name-source/2026-03-11T17:33:28Z|' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XSKqZQ8dFb ++ mktemp + local LAST_ERR=/tmp/tmp.MlsXdGqyOn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XSKqZQ8dFb perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio-physical created + cat /tmp/tmp.MlsXdGqyOn + rm /tmp/tmp.XSKqZQ8dFb /tmp/tmp.MlsXdGqyOn + return 0 + wait_restore backup-minio-physical some-name-target requested 0 + local backup_name=backup-minio-physical + local cluster_name=some-name-target + local target_state=requested + local wait_cluster_consistency=0 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-physical object to be created.OK Waiting psmdb-restore/restore-backup-minio-physical to reach state "requested" ...OK after 2 minutes + [[ 0 -eq 1 ]] + compare_pbm_config some-name-target source mongod + local cluster=some-name-target + local compare_suffix=source + local container=mongod + local pbm_binary=pbm + [[ mongod == \m\o\n\g\o\d ]] + pbm_binary=/opt/percona/pbm + kubectl_bin exec some-name-target-rs0-0 -c mongod -- /opt/percona/pbm config ++ mktemp + local LAST_OUT=/tmp/tmp.WnxlCHFqhy ++ mktemp + local LAST_ERR=/tmp/tmp.cN7XLBfbiG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-target-rs0-0 -c mongod -- /opt/percona/pbm config + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WnxlCHFqhy + cat /tmp/tmp.cN7XLBfbiG + rm /tmp/tmp.WnxlCHFqhy /tmp/tmp.cN7XLBfbiG + return 0 + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/pbm_config_source.yml /tmp/tmp.OiE4KhZV95/pbm_config.yml + log 'Backup source is set as main storage during restore: OK' + set +o xtrace [2026-03-11T17:40:29+0000] Backup source is set as main storage during restore: OK ++ date +%s + local now_ts=1773250829 + wait_restore backup-minio-physical some-name-target ready + local backup_name=backup-minio-physical + local cluster_name=some-name-target + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio-physical object to be created.OK Waiting psmdb-restore/restore-backup-minio-physical to reach state "ready" ..OK after 1 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vkCEr0I7go +++ mktemp ++ local LAST_ERR=/tmp/tmp.9fi4F99sC3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vkCEr0I7go ++ cat /tmp/tmp.9fi4F99sC3 ++ rm /tmp/tmp.vkCEr0I7go /tmp/tmp.9fi4F99sC3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bW3iqSt5Yo +++ mktemp ++ local LAST_ERR=/tmp/tmp.PyDM6ijfsl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bW3iqSt5Yo ++ cat /tmp/tmp.PyDM6ijfsl ++ rm /tmp/tmp.bW3iqSt5Yo /tmp/tmp.PyDM6ijfsl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fGgC1YiMYR +++ mktemp ++ local LAST_ERR=/tmp/tmp.W8Y47SB53c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fGgC1YiMYR ++ cat /tmp/tmp.W8Y47SB53c ++ rm /tmp/tmp.fGgC1YiMYR /tmp/tmp.W8Y47SB53c ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MzunHOll3p +++ mktemp ++ local LAST_ERR=/tmp/tmp.1pi6n6SLMu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MzunHOll3p ++ cat /tmp/tmp.1pi6n6SLMu ++ rm /tmp/tmp.MzunHOll3p /tmp/tmp.1pi6n6SLMu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QJ3keLEIJG +++ mktemp ++ local LAST_ERR=/tmp/tmp.c9a3KJQFol ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QJ3keLEIJG ++ cat /tmp/tmp.c9a3KJQFol ++ rm /tmp/tmp.QJ3keLEIJG /tmp/tmp.c9a3KJQFol ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SYXiRfvGgk +++ mktemp ++ local LAST_ERR=/tmp/tmp.KKMddKrwrJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SYXiRfvGgk ++ cat /tmp/tmp.KKMddKrwrJ ++ rm /tmp/tmp.SYXiRfvGgk /tmp/tmp.KKMddKrwrJ ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dIxNlI4mr6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hUlwm4fRCb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dIxNlI4mr6 ++ cat /tmp/tmp.hUlwm4fRCb ++ rm /tmp/tmp.dIxNlI4mr6 /tmp/tmp.hUlwm4fRCb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KoRL2RnSrc +++ mktemp ++ local LAST_ERR=/tmp/tmp.ngByQCitI4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KoRL2RnSrc ++ cat /tmp/tmp.ngByQCitI4 ++ rm /tmp/tmp.KoRL2RnSrc /tmp/tmp.ngByQCitI4 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HeAZRw8GeR +++ mktemp ++ local LAST_ERR=/tmp/tmp.qRTxWEFI2G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HeAZRw8GeR ++ cat /tmp/tmp.qRTxWEFI2G ++ rm /tmp/tmp.HeAZRw8GeR /tmp/tmp.qRTxWEFI2G ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_pbm_config some-name-target target + local cluster=some-name-target + local compare_suffix=target + local container=backup-agent + local pbm_binary=pbm + [[ backup-agent == \m\o\n\g\o\d ]] + kubectl_bin exec some-name-target-rs0-0 -c backup-agent -- pbm config ++ mktemp + local LAST_OUT=/tmp/tmp.UVv9HddQs6 ++ mktemp + local LAST_ERR=/tmp/tmp.gdDhkaSuBj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec some-name-target-rs0-0 -c backup-agent -- pbm config + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UVv9HddQs6 + cat /tmp/tmp.gdDhkaSuBj + rm /tmp/tmp.UVv9HddQs6 /tmp/tmp.gdDhkaSuBj + return 0 + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/pbm_config_target.yml /tmp/tmp.OiE4KhZV95/pbm_config.yml + log 'Main storage is reverted after backup-minio-physical: OK' + set +o xtrace [2026-03-11T17:43:50+0000] Main storage is reverted after backup-minio-physical: OK + wait_for_pbm_operations some-name-target + local cluster=some-name-target + set +o xtrace waiting for PBM operation to finish.OK + check_if_resync_triggered some-name-target 1773250829 + local cluster=some-name-target + local ts=1773250829 + log 'checking if resync is triggered after 1773250829' + set +o xtrace [2026-03-11T17:43:51+0000] checking if resync is triggered after 1773250829 ++ run_mongo 'use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250829 }})' backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331 ++ local 'command=use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250829 }})' ++ local uri=backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local replica_set=rs0 ++ [[ backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7GUSa4cZkT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EbWFUVgYRQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7GUSa4cZkT +++ cat /tmp/tmp.EbWFUVgYRQ +++ rm /tmp/tmp.7GUSa4cZkT /tmp/tmp.EbWFUVgYRQ +++ return 0 ++ local client_container=psmdb-client-bb8b97679-klgql ++ kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250829 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NG9TIGDLXg +++ mktemp ++ local LAST_ERR=/tmp/tmp.ELZGuSxg0S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250829 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.NG9TIGDLXg ++ cat /tmp/tmp.ELZGuSxg0S command terminated with exit code 1 ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250829 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.NG9TIGDLXg ++ cat /tmp/tmp.ELZGuSxg0S command terminated with exit code 1 ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use admin\n db.pbmCmd.countDocuments({cmd: '\''resync'\'', ts: { '\''$gt'\'': 1773250829 }})\n'\'' | mongo mongodb+srv://backup:backup123456@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.NG9TIGDLXg ++ cat /tmp/tmp.ELZGuSxg0S command terminated with exit code 1 ++ sleep 8 ++ cat /tmp/tmp.NG9TIGDLXg ++ cat /tmp/tmp.ELZGuSxg0S command terminated with exit code 1 ++ rm /tmp/tmp.NG9TIGDLXg /tmp/tmp.ELZGuSxg0S ++ return 1 + local 'count=Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0ce5cac9-ea9f-40e7-b70e-fed9dd233f5b") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1e98690b-245c-480f-b0c5-3accc419ae2d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got '\'':'\'' : @(shell):1:45 bye' + [[ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0ce5cac9-ea9f-40e7-b70e-fed9dd233f5b") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1e98690b-245c-480f-b0c5-3accc419ae2d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye -lt 1 ]] ./e2e-tests/pitr-to-new-cluster/run: line 91: [[: Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0ce5cac9-ea9f-40e7-b70e-fed9dd233f5b") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1e98690b-245c-480f-b0c5-3accc419ae2d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye: syntax error: invalid arithmetic operator (error token is ".4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0ce5cac9-ea9f-40e7-b70e-fed9dd233f5b") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1e98690b-245c-480f-b0c5-3accc419ae2d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e4fdf503-ab3d-4d6f-a030-0bc118b5a819") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db admin uncaught exception: SyntaxError: expected property name, got ':' : @(shell):1:45 bye") + compare_mongo_cmd find myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 -2nd .svc.cluster.local myApp test 'sort( { x: 1 } )' + local command=find + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local 'sort=sort( { x: 1 } )' + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n sort( { x: 1 } ) ]] + full_command='db.test.find().sort( { x: 1 } )' + log 'running db.test.find().sort( { x: 1 } ) in myApp' + set +o xtrace [2026-03-11T17:44:09+0000] running db.test.find().sort( { x: 1 } ) in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find().sort( { x: 1 } )' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find().sort( { x: 1 } )' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4epHK0lBfO +++ mktemp ++ local LAST_ERR=/tmp/tmp.iVSsxUetwB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4epHK0lBfO ++ cat /tmp/tmp.iVSsxUetwB ++ rm /tmp/tmp.4epHK0lBfO /tmp/tmp.iVSsxUetwB ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yyJzD8lg4N ++ mktemp + local LAST_ERR=/tmp/tmp.p3hIVAFYKK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yyJzD8lg4N + cat /tmp/tmp.p3hIVAFYKK + rm /tmp/tmp.yyJzD8lg4N /tmp/tmp.p3hIVAFYKK + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/find-2nd.json /tmp/tmp.OiE4KhZV95/find-2nd + log 'Data is restored from backup-minio-physical: OK' + set +o xtrace [2026-03-11T17:44:12+0000] Data is restored from backup-minio-physical: OK + desc 'Case 2: Physical restore: OK' + set +o xtrace ----------------------------------------------------------------------------------- Case 2: Physical restore: OK ----------------------------------------------------------------------------------- + desc 'Case 3: Backup and restore on some-name-target' + set +o xtrace ----------------------------------------------------------------------------------- Case 3: Backup and restore on some-name-target ----------------------------------------------------------------------------------- + test_case_3 + local backup_name=target-backup-minio + write_document some-name-target 100504 + local cluster=some-name-target + local x=100504 + log 'write document: 100504' + set +o xtrace [2026-03-11T17:44:12+0000] write document: 100504 + run_mongo 'use myApp\n db.test.insert({ x: 100504 })' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100504 })' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LblQnuFTZP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xns2vWXYlL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LblQnuFTZP ++ cat /tmp/tmp.Xns2vWXYlL ++ rm /tmp/tmp.LblQnuFTZP /tmp/tmp.Xns2vWXYlL ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.6IuFpcAJUy ++ mktemp + local LAST_ERR=/tmp/tmp.dtSKVHSg1i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6IuFpcAJUy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d1ec6f6f-415b-4872-9b87-2c4e9fa11e85") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.dtSKVHSg1i + rm /tmp/tmp.6IuFpcAJUy /tmp/tmp.dtSKVHSg1i + return 0 + run_backup some-name-target target-backup-minio logical + local cluster=some-name-target + local name=target-backup-minio + local type=logical + desc 'run backup target-backup-minio-logical' + set +o xtrace ----------------------------------------------------------------------------------- run backup target-backup-minio-logical ----------------------------------------------------------------------------------- + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/backup.yml + /usr/sbin/sed -e 's/name:/name: target-backup-minio-logical/' + /usr/sbin/sed -e 's/clusterName:/clusterName: some-name-target/' + /usr/sbin/sed -e 's/type:/type: logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.5cbX5e8cQZ ++ mktemp + local LAST_ERR=/tmp/tmp.Tf5FExR22Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5cbX5e8cQZ perconaservermongodbbackup.psmdb.percona.com/target-backup-minio-logical created + cat /tmp/tmp.Tf5FExR22Q + rm /tmp/tmp.5cbX5e8cQZ /tmp/tmp.Tf5FExR22Q + return 0 + wait_backup target-backup-minio-logical + local backup_name=target-backup-minio-logical + local target_state=ready + set +o xtrace waiting for target-backup-minio-logical to reach ready state.......OK + log 'checking target-backup-minio-logical in storage' + set +o xtrace [2026-03-11T17:44:30+0000] checking target-backup-minio-logical in storage + check_backup_in_storage target-backup-minio-logical minio rs0 myApp.test.gz + local backup=target-backup-minio-logical + local storage_type=minio + local replset=rs0 + local file=myApp.test.gz + local protocol=http + local endpoint ++ get_backup_dest target-backup-minio-logical ++ local backup_name=target-backup-minio-logical ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup target-backup-minio-logical -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.KBXVmFJfTT +++ mktemp ++ local LAST_ERR=/tmp/tmp.UOLgRVulak ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup target-backup-minio-logical -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KBXVmFJfTT ++ cat /tmp/tmp.UOLgRVulak ++ rm /tmp/tmp.KBXVmFJfTT /tmp/tmp.UOLgRVulak ++ return 0 + backup_dest=operator-testing/some-name-target/2026-03-11T17:44:18Z + case ${storage_type} in + endpoint=minio-service + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/some-name-target/2026-03-11T17:44:18Z/rs0/myApp.test.gz + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.HNgEIwf7JJ ++ mktemp + local LAST_ERR=/tmp/tmp.GBU0X9FpJi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/some-name-target/2026-03-11T17:44:18Z/rs0/myApp.test.gz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HNgEIwf7JJ + cat /tmp/tmp.GBU0X9FpJi All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_pitr-to-new-cluster-13331 + rm /tmp/tmp.HNgEIwf7JJ /tmp/tmp.GBU0X9FpJi + return 0 2026-03-11 17:44:21 118 myApp.test.gz + log 'target-backup-minio is uploaded to correct storage: OK' + set +o xtrace [2026-03-11T17:44:35+0000] target-backup-minio is uploaded to correct storage: OK + log 'enabling PiTR on some-name-target' + set +o xtrace [2026-03-11T17:44:35+0000] enabling PiTR on some-name-target + kubectl_bin patch psmdb some-name-target --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": true}}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.yRLrP1LcY0 ++ mktemp + local LAST_ERR=/tmp/tmp.JEDTnjloBJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name-target --type=merge --patch '{"spec": {"backup": {"pitr": {"enabled": true}}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yRLrP1LcY0 perconaservermongodb.psmdb.percona.com/some-name-target patched + cat /tmp/tmp.JEDTnjloBJ + rm /tmp/tmp.yRLrP1LcY0 /tmp/tmp.JEDTnjloBJ + return 0 + write_document some-name-target 100505 + local cluster=some-name-target + local x=100505 + log 'write document: 100505' + set +o xtrace [2026-03-11T17:44:37+0000] write document: 100505 + run_mongo 'use myApp\n db.test.insert({ x: 100505 })' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100505 })' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RxixFcCbEa +++ mktemp ++ local LAST_ERR=/tmp/tmp.HyOzO9NZUZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RxixFcCbEa ++ cat /tmp/tmp.HyOzO9NZUZ ++ rm /tmp/tmp.RxixFcCbEa /tmp/tmp.HyOzO9NZUZ ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.t8djgvM57u ++ mktemp + local LAST_ERR=/tmp/tmp.ASCJ2CzlAh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t8djgvM57u Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("562576fe-2292-4225-9c07-f4fb8e330846") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ASCJ2CzlAh + rm /tmp/tmp.t8djgvM57u /tmp/tmp.ASCJ2CzlAh + return 0 + write_document some-name-target 100506 + local cluster=some-name-target + local x=100506 + log 'write document: 100506' + set +o xtrace [2026-03-11T17:44:40+0000] write document: 100506 + run_mongo 'use myApp\n db.test.insert({ x: 100506 })' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100506 })' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F2TwLx5quC +++ mktemp ++ local LAST_ERR=/tmp/tmp.27ayAnzu5l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F2TwLx5quC ++ cat /tmp/tmp.27ayAnzu5l ++ rm /tmp/tmp.F2TwLx5quC /tmp/tmp.27ayAnzu5l ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100506 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ppl4J07KOy ++ mktemp + local LAST_ERR=/tmp/tmp.75w5zUomdz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100506 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ppl4J07KOy Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("846c2ffe-068a-4e1e-b13f-316eb9f69515") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.75w5zUomdz + rm /tmp/tmp.Ppl4J07KOy /tmp/tmp.75w5zUomdz + return 0 + write_document some-name-target 100507 + local cluster=some-name-target + local x=100507 + log 'write document: 100507' + set +o xtrace [2026-03-11T17:44:43+0000] write document: 100507 + run_mongo 'use myApp\n db.test.insert({ x: 100507 })' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb + local 'command=use myApp\n db.test.insert({ x: 100507 })' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.91wLAxNsVE +++ mktemp ++ local LAST_ERR=/tmp/tmp.G0IJp6iHOV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.91wLAxNsVE ++ cat /tmp/tmp.G0IJp6iHOV ++ rm /tmp/tmp.91wLAxNsVE /tmp/tmp.G0IJp6iHOV ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100507 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DKkB9lRFad ++ mktemp + local LAST_ERR=/tmp/tmp.R2N63TjnRP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100507 })\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DKkB9lRFad Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("15f5e2eb-a8e7-44d5-a378-f51ff71c87ac") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.R2N63TjnRP + rm /tmp/tmp.DKkB9lRFad /tmp/tmp.R2N63TjnRP + return 0 + sleep_with_log 3 'wait for data to be replicated' + local d=3 + local 'msg=wait for data to be replicated' + log 'wait for data to be replicated. sleeping for 3 seconds' + set +o xtrace [2026-03-11T17:44:46+0000] wait for data to be replicated. sleeping for 3 seconds + sleep 3 + compare_mongo_cmd find myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 -3rd .svc.cluster.local myApp test 'sort( { x: 1 } )' + local command=find + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local 'sort=sort( { x: 1 } )' + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n sort( { x: 1 } ) ]] + full_command='db.test.find().sort( { x: 1 } )' + log 'running db.test.find().sort( { x: 1 } ) in myApp' + set +o xtrace [2026-03-11T17:44:49+0000] running db.test.find().sort( { x: 1 } ) in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find().sort( { x: 1 } )' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb .svc.cluster.local '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find().sort( { x: 1 } )' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ipgUcOrSDl +++ mktemp ++ local LAST_ERR=/tmp/tmp.9MbF0X6itX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ipgUcOrSDl ++ cat /tmp/tmp.9MbF0X6itX ++ rm /tmp/tmp.ipgUcOrSDl /tmp/tmp.9MbF0X6itX ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0FwNw1mUHL ++ mktemp + local LAST_ERR=/tmp/tmp.pmYO7oQQAa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0FwNw1mUHL + cat /tmp/tmp.pmYO7oQQAa + rm /tmp/tmp.0FwNw1mUHL /tmp/tmp.pmYO7oQQAa + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/find-3rd.json /tmp/tmp.OiE4KhZV95/find-3rd + log 'Data is ready for PiTR: OK' + set +o xtrace [2026-03-11T17:44:52+0000] Data is ready for PiTR: OK + sleep_with_log 70 'wait for oplog chunks to be uploaded' + local d=70 + local 'msg=wait for oplog chunks to be uploaded' + log 'wait for oplog chunks to be uploaded. sleeping for 70 seconds' + set +o xtrace [2026-03-11T17:44:52+0000] wait for oplog chunks to be uploaded. sleeping for 70 seconds + sleep 70 +++ get_latest_restorable_time some-name-target-rs0 +++ local cluster=some-name-target-rs0 +++ local first_timestamp +++ local second_timestamp +++ local retry=0 +++ [[ '' != '' ]] ++++ kubectl_bin exec some-name-target-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.GkQv42U9z0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6KP4Q906Cl ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-target-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.GkQv42U9z0 ++++ cat /tmp/tmp.6KP4Q906Cl ++++ rm /tmp/tmp.GkQv42U9z0 /tmp/tmp.6KP4Q906Cl ++++ return 0 +++ first_timestamp=null +++ sleep 5 +++ [[ null != '' ]] +++ [[ null != \n\u\l\l ]] +++ let retry+=1 +++ [[ 1 -gt 30 ]] +++ [[ null != '' ]] +++ [[ null != \n\u\l\l ]] ++++ kubectl_bin exec some-name-target-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.k8DnITHv2o +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pRxbtjR1tz ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-target-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.k8DnITHv2o ++++ cat /tmp/tmp.pRxbtjR1tz ++++ rm /tmp/tmp.k8DnITHv2o /tmp/tmp.pRxbtjR1tz ++++ return 0 +++ first_timestamp=1773251165 +++ sleep 5 +++ [[ 1773251165 != '' ]] +++ [[ 1773251165 != \n\u\l\l ]] ++++ kubectl_bin exec some-name-target-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MNLojaKYnF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.PS2rcZ94Su ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-target-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.MNLojaKYnF ++++ cat /tmp/tmp.PS2rcZ94Su ++++ rm /tmp/tmp.MNLojaKYnF /tmp/tmp.PS2rcZ94Su ++++ return 0 +++ second_timestamp=1773251165 +++ let retry+=1 +++ [[ 2 -gt 30 ]] +++ [[ 1773251165 != '' ]] +++ [[ 1773251165 != \n\u\l\l ]] +++ [[ 1773251165 == 1773251165 ]] +++ /usr/sbin/date -u -d @1773251165 +%Y-%m-%dT%H:%M:%SZ ++ format_pitr_target 2026-03-11T17:46:05Z ++ local target=2026-03-11T17:46:05Z ++ echo 2026-03-11T17:46:05Z ++ sed 's/T/ /' ++ sed s/Z// + local 'pitrTarget=2026-03-11 17:46:05' + log 'dropping collection: myApp.test' + set +o xtrace [2026-03-11T17:46:17+0000] dropping collection: myApp.test + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8wngEhxaFn +++ mktemp ++ local LAST_ERR=/tmp/tmp.2uBQutk8ZI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8wngEhxaFn ++ cat /tmp/tmp.2uBQutk8ZI ++ rm /tmp/tmp.8wngEhxaFn /tmp/tmp.2uBQutk8ZI ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BgblSTSm3l ++ mktemp + local LAST_ERR=/tmp/tmp.D1srTe7r17 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BgblSTSm3l Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-target-rs0-1.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-0.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017,some-name-target-rs0-2.some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("605bb1fb-6af3-43a8-a7b9-d7a15d4949e0") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.D1srTe7r17 + rm /tmp/tmp.BgblSTSm3l /tmp/tmp.D1srTe7r17 + return 0 + log 'PiTR target is 2026-03-11 17:46:05' + set +o xtrace [2026-03-11T17:46:20+0000] PiTR target is 2026-03-11 17:46:05 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/conf/restore-target.yml + kubectl_bin apply -f - + /usr/sbin/sed -e 's/date:/date: 2026-03-11 17:46:05/' ++ mktemp + local LAST_OUT=/tmp/tmp.6YPNxbypcT ++ mktemp + local LAST_ERR=/tmp/tmp.nXhU49QYFr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6YPNxbypcT perconaservermongodbrestore.psmdb.percona.com/restore-target-backup-minio-logical created + cat /tmp/tmp.nXhU49QYFr + rm /tmp/tmp.6YPNxbypcT /tmp/tmp.nXhU49QYFr + return 0 + wait_restore target-backup-minio-logical some-name-target ready + local backup_name=target-backup-minio-logical + local cluster_name=some-name-target + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-target-backup-minio-logical object to be created.OK Waiting psmdb-restore/restore-target-backup-minio-logical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name-target + local cluster_name=some-name-target + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name-target -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.84JiwADaRZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.23aa0ec9jk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name-target -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.84JiwADaRZ ++ cat /tmp/tmp.23aa0ec9jk ++ rm /tmp/tmp.84JiwADaRZ /tmp/tmp.23aa0ec9jk ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 -3rd .svc.cluster.local myApp test 'sort( { x: 1 } )' + local command=find + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local 'sort=sort( { x: 1 } )' + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n sort( { x: 1 } ) ]] + full_command='db.test.find().sort( { x: 1 } )' + log 'running db.test.find().sort( { x: 1 } ) in myApp' + set +o xtrace [2026-03-11T17:46:58+0000] running db.test.find().sort( { x: 1 } ) in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find().sort( { x: 1 } )' myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find().sort( { x: 1 } )' + local uri=myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + [[ myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FvfOEGQkRt +++ mktemp ++ local LAST_ERR=/tmp/tmp.6EGBOHHVuB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FvfOEGQkRt ++ cat /tmp/tmp.6EGBOHHVuB ++ rm /tmp/tmp.FvfOEGQkRt /tmp/tmp.6EGBOHHVuB ++ return 0 + local client_container=psmdb-client-bb8b97679-klgql + kubectl_bin exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9z1Kuui5Wn ++ mktemp + local LAST_ERR=/tmp/tmp.slK56QP8LK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-klgql -- bash -c 'printf '\''use myApp\n db.test.find().sort( { x: 1 } )\n'\'' | mongo mongodb://myApp:myPass@some-name-target-rs0.pitr-to-new-cluster-13331.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9z1Kuui5Wn + cat /tmp/tmp.slK56QP8LK + rm /tmp/tmp.9z1Kuui5Wn /tmp/tmp.slK56QP8LK + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/e2e-tests/pitr-to-new-cluster/compare/find-3rd.json /tmp/tmp.OiE4KhZV95/find-3rd + log 'Data is restored from target-backup-minio-logical: OK' + set +o xtrace [2026-03-11T17:47:01+0000] Data is restored from target-backup-minio-logical: OK + desc 'Case 3: Backup and restore on some-name-target: OK' + set +o xtrace ----------------------------------------------------------------------------------- Case 3: Backup and restore on some-name-target: OK ----------------------------------------------------------------------------------- + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + destroy pitr-to-new-cluster-13331 + local namespace=pitr-to-new-cluster-13331 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.svwDI1oHVk +++ mktemp ++ local LAST_ERR=/tmp/tmp.gQK4eUyzYO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.svwDI1oHVk ++ cat /tmp/tmp.gQK4eUyzYO ++ rm /tmp/tmp.svwDI1oHVk /tmp/tmp.gQK4eUyzYO ++ return 0 + '[' 3 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.p4NmYtkuE7 ++ mktemp + local LAST_ERR=/tmp/tmp.MKiJS0Fl56 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p4NmYtkuE7 NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-minio-logical some-name-source minio s3://operator-testing/some-name-source/2026-03-11T17:32:04Z logical 64.72KB ready 14m 15m backup-minio-physical some-name-source minio s3://operator-testing/some-name-source/2026-03-11T17:33:28Z physical 644.52KB ready 13m 13m target-backup-minio-logical some-name-target minio s3://operator-testing/some-name-target/2026-03-11T17:44:18Z logical 49.53KB ready 2m39s 2m45s + cat /tmp/tmp.MKiJS0Fl56 + rm /tmp/tmp.p4NmYtkuE7 /tmp/tmp.MKiJS0Fl56 + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.98tSUApLqL ++ mktemp + local LAST_ERR=/tmp/tmp.pvzY7pEaAi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.98tSUApLqL perconaservermongodbbackup.psmdb.percona.com "backup-minio-logical" deleted from pitr-to-new-cluster-13331 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio-physical" deleted from pitr-to-new-cluster-13331 namespace perconaservermongodbbackup.psmdb.percona.com "target-backup-minio-logical" deleted from pitr-to-new-cluster-13331 namespace + cat /tmp/tmp.pvzY7pEaAi + rm /tmp/tmp.98tSUApLqL /tmp/tmp.pvzY7pEaAi + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.ykEMWpdmse ++ mktemp + local LAST_ERR=/tmp/tmp.GaWj7uJcFJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ykEMWpdmse customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.GaWj7uJcFJ + rm /tmp/tmp.ykEMWpdmse /tmp/tmp.GaWj7uJcFJ + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.S1MoPmIV4A ++ mktemp + local LAST_ERR=/tmp/tmp.sply92P6cr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S1MoPmIV4A + cat /tmp/tmp.sply92P6cr + rm /tmp/tmp.S1MoPmIV4A /tmp/tmp.sply92P6cr + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.A8KrEUI2Ag ++ mktemp + local LAST_ERR=/tmp/tmp.APCBmZAd7b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A8KrEUI2Ag + cat /tmp/tmp.APCBmZAd7b + rm /tmp/tmp.A8KrEUI2Ag /tmp/tmp.APCBmZAd7b + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Pp5TxI0wQe ++ mktemp + local LAST_ERR=/tmp/tmp.5uVgK4pL8N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Pp5TxI0wQe + cat /tmp/tmp.5uVgK4pL8N + rm /tmp/tmp.Pp5TxI0wQe /tmp/tmp.5uVgK4pL8N + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.t3zuY0PoFB ++ mktemp + local LAST_ERR=/tmp/tmp.XHNHBy7Hhm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2232/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t3zuY0PoFB clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.XHNHBy7Hhm + rm /tmp/tmp.t3zuY0PoFB /tmp/tmp.XHNHBy7Hhm + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.JGg576y5d5 ++ mktemp + local LAST_ERR=/tmp/tmp.5WGLhhstIA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.JGg576y5d5 + cat /tmp/tmp.5WGLhhstIA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.JGg576y5d5 + cat /tmp/tmp.5WGLhhstIA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.JGg576y5d5 + cat /tmp/tmp.5WGLhhstIA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.JGg576y5d5 + cat /tmp/tmp.5WGLhhstIA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.JGg576y5d5 /tmp/tmp.5WGLhhstIA + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace pitr-to-new-cluster-13331 + rm -rf /tmp/tmp.OiE4KhZV95 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.G5VJtukROG ++ mktemp + local LAST_OUT=/tmp/tmp.Cw5GVZ6B9U ++ mktemp + local LAST_ERR=/tmp/tmp.7ovh4rqFnm + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Hk1tvMPrt2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace pitr-to-new-cluster-13331 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator