Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/logs/demand-backup-fs.log Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 + cluster=some-name + create_infra demand-backup-fs-27812 + local ns=demand-backup-fs-27812 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.l6wiz3QUqR ++ mktemp + local LAST_ERR=/tmp/tmp.iHLwwS5vfq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l6wiz3QUqR customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.iHLwwS5vfq + rm /tmp/tmp.l6wiz3QUqR /tmp/tmp.iHLwwS5vfq + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-22322 backup-nfs-logical --type=merge -p '{"metadata":{"finalizers":[]}}' Error from server (NotFound): perconaservermongodbbackups.psmdb.percona.com "backup-nfs-logical" not found + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Uy3nrpdMdG ++ mktemp + local LAST_ERR=/tmp/tmp.oPRyOMvDwK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Uy3nrpdMdG + cat /tmp/tmp.oPRyOMvDwK + rm /tmp/tmp.Uy3nrpdMdG /tmp/tmp.oPRyOMvDwK + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.eSdasmDkT7 ++ mktemp + local LAST_ERR=/tmp/tmp.hE7iSkCzeC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eSdasmDkT7 + cat /tmp/tmp.hE7iSkCzeC + rm /tmp/tmp.eSdasmDkT7 /tmp/tmp.hE7iSkCzeC + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-22322 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.fJTxpvpmXv ++ mktemp + local LAST_ERR=/tmp/tmp.e5iJW4ZVww + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fJTxpvpmXv customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.e5iJW4ZVww + rm /tmp/tmp.fJTxpvpmXv /tmp/tmp.e5iJW4ZVww + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.EwnatgRBFa ++ mktemp + local LAST_ERR=/tmp/tmp.xTGLDip0eA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EwnatgRBFa clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.xTGLDip0eA + rm /tmp/tmp.EwnatgRBFa /tmp/tmp.xTGLDip0eA + return 0 + check_crd_for_deletion PR-2274-acb3b334 + local git_tag=PR-2274-acb3b334 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2274-acb3b334/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/sbin/sed s/---//g + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.grFvUMFpUD +++ mktemp ++ local LAST_ERR=/tmp/tmp.duHYhi7jOZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.grFvUMFpUD ++ cat /tmp/tmp.duHYhi7jOZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.grFvUMFpUD ++ cat /tmp/tmp.duHYhi7jOZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.grFvUMFpUD ++ cat /tmp/tmp.duHYhi7jOZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.grFvUMFpUD ++ cat /tmp/tmp.duHYhi7jOZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.grFvUMFpUD /tmp/tmp.duHYhi7jOZ ++ return 1 + [[ '' == Terminating ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + local LAST_OUT=/tmp/tmp.kxgdPIyZ0g ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.7j8nf1qebY + local LAST_OUT=/tmp/tmp.9ookeF6b0Z + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.RwGqA1qY1K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kxgdPIyZ0g + cat /tmp/tmp.7j8nf1qebY + rm /tmp/tmp.kxgdPIyZ0g /tmp/tmp.7j8nf1qebY + return 0 namespace "demand-backup-fs-22322" deleted namespace "storage" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9ookeF6b0Z namespace "psmdb-operator" deleted + cat /tmp/tmp.RwGqA1qY1K + rm /tmp/tmp.9ookeF6b0Z /tmp/tmp.RwGqA1qY1K + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Y5VlB4Bblx ++ mktemp + local LAST_ERR=/tmp/tmp.VDVINpCppW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y5VlB4Bblx + cat /tmp/tmp.VDVINpCppW + rm /tmp/tmp.Y5VlB4Bblx /tmp/tmp.VDVINpCppW + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.4rzhMlb8Vg ++ mktemp + local LAST_ERR=/tmp/tmp.yW4ONJw7TQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4rzhMlb8Vg namespace/psmdb-operator created + cat /tmp/tmp.yW4ONJw7TQ + rm /tmp/tmp.4rzhMlb8Vg /tmp/tmp.yW4ONJw7TQ + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.RyBiBYtmlF +++ mktemp ++ local LAST_ERR=/tmp/tmp.ePH8SIH4Xg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RyBiBYtmlF ++ cat /tmp/tmp.ePH8SIH4Xg ++ rm /tmp/tmp.RyBiBYtmlF /tmp/tmp.ePH8SIH4Xg ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster12 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fUvpBVAJCE ++ mktemp + local LAST_ERR=/tmp/tmp.GXuxFFPsTk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster12 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fUvpBVAJCE Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster12" modified. + cat /tmp/tmp.GXuxFFPsTk + rm /tmp/tmp.fUvpBVAJCE /tmp/tmp.GXuxFFPsTk + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.eogVzeR8o2 ++ mktemp + local LAST_ERR=/tmp/tmp.R5rnEnvlkM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eogVzeR8o2 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.R5rnEnvlkM + rm /tmp/tmp.eogVzeR8o2 /tmp/tmp.R5rnEnvlkM + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.1SbQSGT5DE ++ mktemp + local LAST_ERR=/tmp/tmp.8DiqQYblbQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1SbQSGT5DE clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.8DiqQYblbQ + rm /tmp/tmp.1SbQSGT5DE /tmp/tmp.8DiqQYblbQ + return 0 + yq eval $'\n\t\t\t(.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334") |\n\t\t\t((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") |\n\t\t\t((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.lrWUIclLSc ++ mktemp + local LAST_ERR=/tmp/tmp.comSUbmzqj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lrWUIclLSc deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.comSUbmzqj + rm /tmp/tmp.lrWUIclLSc /tmp/tmp.comSUbmzqj + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.PriH2NtRMB +++ mktemp ++ local LAST_ERR=/tmp/tmp.0LgG2WvQvJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PriH2NtRMB ++ cat /tmp/tmp.0LgG2WvQvJ ++ rm /tmp/tmp.PriH2NtRMB /tmp/tmp.0LgG2WvQvJ ++ return 0 + wait_operator_pod percona-server-mongodb-operator-578bbff7d7-v2hst + local pod=percona-server-mongodb-operator-578bbff7d7-v2hst + set +o xtrace waiting for pod/percona-server-mongodb-operator-578bbff7d7-v2hst to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.lQeNRAOBXB +++ mktemp ++ local LAST_ERR=/tmp/tmp.es1FPgKhc5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lQeNRAOBXB ++ cat /tmp/tmp.es1FPgKhc5 ++ rm /tmp/tmp.lQeNRAOBXB /tmp/tmp.es1FPgKhc5 ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-578bbff7d7-v2hst ++ mktemp + local LAST_OUT=/tmp/tmp.oIToozp8Kl ++ mktemp + local LAST_ERR=/tmp/tmp.yAWyvwZkMa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-578bbff7d7-v2hst + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oIToozp8Kl + cat /tmp/tmp.yAWyvwZkMa + rm /tmp/tmp.oIToozp8Kl /tmp/tmp.yAWyvwZkMa + return 0 2026-04-23T08:30:52.163Z INFO setup Manager starting up {"gitCommit": "acb3b334b50f3e35633eb97dbf38451e80253352", "gitBranch": "PR-2274-acb3b334", "buildTime": "", "goVersion": "go1.25.9", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-fs-27812 + local namespace=demand-backup-fs-27812 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.uXt0SAuncQ + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-fs-27812' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-fs-27812 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-fs-27812 --ignore-not-found + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.heiwcLjxgx + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.0Y1a6DG1VX + for i in $(seq 0 2) + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.aS4QDxecH2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-fs-27812 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uXt0SAuncQ + cat /tmp/tmp.heiwcLjxgx + rm /tmp/tmp.uXt0SAuncQ /tmp/tmp.heiwcLjxgx + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0Y1a6DG1VX + cat /tmp/tmp.aS4QDxecH2 + rm /tmp/tmp.0Y1a6DG1VX /tmp/tmp.aS4QDxecH2 + return 0 + kubectl_bin wait --for=delete namespace demand-backup-fs-27812 ++ mktemp + local LAST_OUT=/tmp/tmp.6C27WKNVeQ ++ mktemp + local LAST_ERR=/tmp/tmp.tAZPDDfSXH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-fs-27812 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6C27WKNVeQ + cat /tmp/tmp.tAZPDDfSXH + rm /tmp/tmp.6C27WKNVeQ /tmp/tmp.tAZPDDfSXH + return 0 + desc 'create namespace demand-backup-fs-27812' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-fs-27812 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-fs-27812 ++ mktemp + local LAST_OUT=/tmp/tmp.D8bonx10QT ++ mktemp + local LAST_ERR=/tmp/tmp.zoyoe80so0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-fs-27812 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D8bonx10QT namespace/demand-backup-fs-27812 created + cat /tmp/tmp.zoyoe80so0 + rm /tmp/tmp.D8bonx10QT /tmp/tmp.zoyoe80so0 + return 0 + set_kube_ctx demand-backup-fs-27812 + local namespace=demand-backup-fs-27812 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ABCll43TiP +++ mktemp ++ local LAST_ERR=/tmp/tmp.nOzmiCpESV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ABCll43TiP ++ cat /tmp/tmp.nOzmiCpESV ++ rm /tmp/tmp.ABCll43TiP /tmp/tmp.nOzmiCpESV ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster12 --namespace=demand-backup-fs-27812 ++ mktemp + local LAST_OUT=/tmp/tmp.GEQBvtRwAC ++ mktemp + local LAST_ERR=/tmp/tmp.xruPFDr7YT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster12 --namespace=demand-backup-fs-27812 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GEQBvtRwAC Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster12" modified. + cat /tmp/tmp.xruPFDr7YT + rm /tmp/tmp.GEQBvtRwAC /tmp/tmp.xruPFDr7YT + return 0 + kubectl_bin delete ns storage ++ mktemp + local LAST_OUT=/tmp/tmp.ftCsKgk3DK ++ mktemp + local LAST_ERR=/tmp/tmp.K7huxfPPsy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ftCsKgk3DK + cat /tmp/tmp.K7huxfPPsy Error from server (NotFound): namespaces "storage" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ftCsKgk3DK + cat /tmp/tmp.K7huxfPPsy Error from server (NotFound): namespaces "storage" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ftCsKgk3DK + cat /tmp/tmp.K7huxfPPsy Error from server (NotFound): namespaces "storage" not found + sleep 8 + cat /tmp/tmp.ftCsKgk3DK + cat /tmp/tmp.K7huxfPPsy Error from server (NotFound): namespaces "storage" not found + rm /tmp/tmp.ftCsKgk3DK /tmp/tmp.K7huxfPPsy + return 1 + : + [[ 1 != 1 ]] + uid=1001 + [[ -n '' ]] + log 'deploying NFS server' + set +o xtrace [2026-04-23T08:31:43+0000] deploying NFS server + deploy_nfs_server 1001 + local uid=1001 + kubectl_bin create namespace storage ++ mktemp + local LAST_OUT=/tmp/tmp.ixkdUTd1Gz ++ mktemp + local LAST_ERR=/tmp/tmp.gRxa3CcAWq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace storage + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ixkdUTd1Gz namespace/storage created + cat /tmp/tmp.gRxa3CcAWq + rm /tmp/tmp.ixkdUTd1Gz /tmp/tmp.gRxa3CcAWq + return 0 + kubectl_bin apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/nfs-server.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7npsRb5W5M ++ mktemp + local LAST_ERR=/tmp/tmp.Ytx9RKPmlw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/nfs-server.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7npsRb5W5M serviceaccount/nfs-server created rolebinding.rbac.authorization.k8s.io/system:openshift:scc:privileged created persistentvolumeclaim/nfs-pvc created deployment.apps/nfs-server created service/nfs-service created + cat /tmp/tmp.Ytx9RKPmlw + rm /tmp/tmp.7npsRb5W5M /tmp/tmp.Ytx9RKPmlw + return 0 + sleep 5 ++ kubectl_bin get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XDEx322Pjt +++ mktemp ++ local LAST_ERR=/tmp/tmp.laKX08glwm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XDEx322Pjt ++ cat /tmp/tmp.laKX08glwm ++ rm /tmp/tmp.XDEx322Pjt /tmp/tmp.laKX08glwm ++ return 0 + local nfsPod=nfs-server-7654469b4d-fqxsw ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ez864MbXdl +++ mktemp ++ local LAST_ERR=/tmp/tmp.1PVS37H3e2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ez864MbXdl ++ cat /tmp/tmp.1PVS37H3e2 ++ rm /tmp/tmp.ez864MbXdl /tmp/tmp.1PVS37H3e2 ++ return 0 + [[ Pending == Running ]] + log 'Waiting for nfs-server-7654469b4d-fqxsw to start Running' + set +o xtrace [2026-04-23T08:31:55+0000] Waiting for nfs-server-7654469b4d-fqxsw to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KhjHxw4P1R +++ mktemp ++ local LAST_ERR=/tmp/tmp.cXolxUdpuI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KhjHxw4P1R ++ cat /tmp/tmp.cXolxUdpuI ++ rm /tmp/tmp.KhjHxw4P1R /tmp/tmp.cXolxUdpuI ++ return 0 + [[ Pending == Running ]] + log 'Waiting for nfs-server-7654469b4d-fqxsw to start Running' + set +o xtrace [2026-04-23T08:31:57+0000] Waiting for nfs-server-7654469b4d-fqxsw to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mByJj9XYbT +++ mktemp ++ local LAST_ERR=/tmp/tmp.gZV8TMJcAz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mByJj9XYbT ++ cat /tmp/tmp.gZV8TMJcAz ++ rm /tmp/tmp.mByJj9XYbT /tmp/tmp.gZV8TMJcAz ++ return 0 + [[ Pending == Running ]] + log 'Waiting for nfs-server-7654469b4d-fqxsw to start Running' + set +o xtrace [2026-04-23T08:31:59+0000] Waiting for nfs-server-7654469b4d-fqxsw to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mTamr0rfIY +++ mktemp ++ local LAST_ERR=/tmp/tmp.B2EdtQ2xMv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mTamr0rfIY ++ cat /tmp/tmp.B2EdtQ2xMv ++ rm /tmp/tmp.mTamr0rfIY /tmp/tmp.B2EdtQ2xMv ++ return 0 + [[ Pending == Running ]] + log 'Waiting for nfs-server-7654469b4d-fqxsw to start Running' + set +o xtrace [2026-04-23T08:32:01+0000] Waiting for nfs-server-7654469b4d-fqxsw to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QN2nDkxfKm +++ mktemp ++ local LAST_ERR=/tmp/tmp.yCYDl5Z2TZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QN2nDkxfKm ++ cat /tmp/tmp.yCYDl5Z2TZ ++ rm /tmp/tmp.QN2nDkxfKm /tmp/tmp.yCYDl5Z2TZ ++ return 0 + [[ Pending == Running ]] + log 'Waiting for nfs-server-7654469b4d-fqxsw to start Running' + set +o xtrace [2026-04-23T08:32:03+0000] Waiting for nfs-server-7654469b4d-fqxsw to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sxDIBGAYfZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.zy0dNJ6Vbf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sxDIBGAYfZ ++ cat /tmp/tmp.zy0dNJ6Vbf ++ rm /tmp/tmp.sxDIBGAYfZ /tmp/tmp.zy0dNJ6Vbf ++ return 0 + [[ Pending == Running ]] + log 'Waiting for nfs-server-7654469b4d-fqxsw to start Running' + set +o xtrace [2026-04-23T08:32:05+0000] Waiting for nfs-server-7654469b4d-fqxsw to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UrkceOmFjC +++ mktemp ++ local LAST_ERR=/tmp/tmp.LadqtxpS5n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UrkceOmFjC ++ cat /tmp/tmp.LadqtxpS5n ++ rm /tmp/tmp.UrkceOmFjC /tmp/tmp.LadqtxpS5n ++ return 0 + [[ Pending == Running ]] + log 'Waiting for nfs-server-7654469b4d-fqxsw to start Running' + set +o xtrace [2026-04-23T08:32:07+0000] Waiting for nfs-server-7654469b4d-fqxsw to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oWL3yBQJd4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5GyWJCzOEZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-fqxsw -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oWL3yBQJd4 ++ cat /tmp/tmp.5GyWJCzOEZ ++ rm /tmp/tmp.oWL3yBQJd4 /tmp/tmp.5GyWJCzOEZ ++ return 0 + [[ Running == Running ]] + kubectl_bin exec -n storage nfs-server-7654469b4d-fqxsw -- mkdir /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.Nf2MaUnQxB ++ mktemp + local LAST_ERR=/tmp/tmp.fGFUKsF9rV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-7654469b4d-fqxsw -- mkdir /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Nf2MaUnQxB + cat /tmp/tmp.fGFUKsF9rV + rm /tmp/tmp.Nf2MaUnQxB /tmp/tmp.fGFUKsF9rV + return 0 + kubectl_bin exec -n storage nfs-server-7654469b4d-fqxsw -- chown 1001:1001 /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.qq1zlZziS4 ++ mktemp + local LAST_ERR=/tmp/tmp.oRclzt5VPn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-7654469b4d-fqxsw -- chown 1001:1001 /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qq1zlZziS4 + cat /tmp/tmp.oRclzt5VPn + rm /tmp/tmp.qq1zlZziS4 /tmp/tmp.oRclzt5VPn + return 0 + log 'creating secrets and start client' + set +o xtrace [2026-04-23T08:32:12+0000] creating secrets and start client + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.kJDpiMpVwM ++ mktemp + local LAST_ERR=/tmp/tmp.qmyxgecQyx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kJDpiMpVwM secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.qmyxgecQyx + rm /tmp/tmp.kJDpiMpVwM /tmp/tmp.qmyxgecQyx + return 0 + [[ -n '' ]] + log 'creating PSMDB cluster some-name' + set +o xtrace [2026-04-23T08:32:14+0000] creating PSMDB cluster some-name + [[ 1 != 1 ]] + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-fs-27812/g + local LAST_OUT=/tmp/tmp.PSMkXmykeu + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.tRZmeE4AdK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PSMkXmykeu perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.tRZmeE4AdK + rm /tmp/tmp.PSMkXmykeu /tmp/tmp.tRZmeE4AdK + return 0 + log 'wait for all 3 pods to start' + set +o xtrace [2026-04-23T08:32:17+0000] wait for all 3 pods to start + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready...........................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready................OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0ELXzyHezM +++ mktemp ++ local LAST_ERR=/tmp/tmp.dHVd7YnGWI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0ELXzyHezM ++ cat /tmp/tmp.dHVd7YnGWI ++ rm /tmp/tmp.0ELXzyHezM /tmp/tmp.dHVd7YnGWI ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YnKUwHN4hi +++ mktemp ++ local LAST_ERR=/tmp/tmp.AIWcQ2GtgO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YnKUwHN4hi ++ cat /tmp/tmp.AIWcQ2GtgO ++ rm /tmp/tmp.YnKUwHN4hi /tmp/tmp.AIWcQ2GtgO ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yizY5zftlZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.nbaRn3kZ0N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yizY5zftlZ ++ cat /tmp/tmp.nbaRn3kZ0N ++ rm /tmp/tmp.yizY5zftlZ /tmp/tmp.nbaRn3kZ0N ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness........................ + [[ 1 == 1 ]] + log 'checking if statefulset created with expected config' + set +o xtrace [2026-04-23T08:35:17+0000] checking if statefulset created with expected config + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.EuB5ISTCpj/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("demand-backup-fs-27812", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.tqJ5djFUgX ++ mktemp + local LAST_ERR=/tmp/tmp.3E2XS9bIle + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tqJ5djFUgX + cat /tmp/tmp.3E2XS9bIle + rm /tmp/tmp.tqJ5djFUgX /tmp/tmp.3E2XS9bIle + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.EuB5ISTCpj/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.EuB5ISTCpj/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.EuB5ISTCpj/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml /tmp/tmp.EuB5ISTCpj/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2026-04-23T08:35:18+0000] compare_kubectl: statefulset/some-name-rs0 OK + log 'creating user' + set +o xtrace [2026-04-23T08:35:18+0000] creating user + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-27812 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JY8SoWWcRP +++ mktemp ++ local LAST_ERR=/tmp/tmp.42XiWsnXvP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JY8SoWWcRP ++ cat /tmp/tmp.42XiWsnXvP ++ rm /tmp/tmp.JY8SoWWcRP /tmp/tmp.42XiWsnXvP ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.iD18s1D0kK ++ mktemp + local LAST_ERR=/tmp/tmp.oc32kGEUuK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iD18s1D0kK Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("506058a2-439a-4762-a75c-da25816b4b2e") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.oc32kGEUuK + rm /tmp/tmp.iD18s1D0kK /tmp/tmp.oc32kGEUuK + return 0 + sleep 2 + log 'write initial data' + set +o xtrace [2026-04-23T08:35:23+0000] write initial data + write_data 100500 '' + local x=100500 + local find_prefix= + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N17qOiNdqS +++ mktemp ++ local LAST_ERR=/tmp/tmp.LfUZYRPMsU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N17qOiNdqS ++ cat /tmp/tmp.LfUZYRPMsU ++ rm /tmp/tmp.N17qOiNdqS /tmp/tmp.LfUZYRPMsU ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qkWqS4tSqq ++ mktemp + local LAST_ERR=/tmp/tmp.LnZbqmUhjR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qkWqS4tSqq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("201bd67b-f162-456a-bcd2-1bfced4b2090") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.LnZbqmUhjR + rm /tmp/tmp.qkWqS4tSqq /tmp/tmp.LnZbqmUhjR + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:35:26+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pDYrJW2zWz +++ mktemp ++ local LAST_ERR=/tmp/tmp.VpLQr0OZAk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pDYrJW2zWz ++ cat /tmp/tmp.VpLQr0OZAk ++ rm /tmp/tmp.pDYrJW2zWz /tmp/tmp.VpLQr0OZAk ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.R7bXMP6f8M ++ mktemp + local LAST_ERR=/tmp/tmp.16Gx2oa48S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R7bXMP6f8M + cat /tmp/tmp.16Gx2oa48S + rm /tmp/tmp.R7bXMP6f8M /tmp/tmp.16Gx2oa48S + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.EuB5ISTCpj/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-04-23T08:34:43.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-04-23T08:35:15.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-04-23T08:35:17.000+0000 I listening for the commands + desc 'CASE 1: Logical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: Logical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical + run_backup nfs backup-nfs-logical logical + local storage=nfs + local backup_name=backup-nfs-logical + local type=logical + log 'running backup backup-nfs-logical' + set +o xtrace [2026-04-23T08:35:33+0000] running backup backup-nfs-logical + yq eval $'.metadata.name = "backup-nfs-logical"\n\t\t\t| .spec.storageName = "nfs"\n\t\t\t| .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.543moJhnCw ++ mktemp + local LAST_ERR=/tmp/tmp.cNaaNjITsR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.543moJhnCw perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical created + cat /tmp/tmp.cNaaNjITsR + rm /tmp/tmp.543moJhnCw /tmp/tmp.cNaaNjITsR + return 0 + wait_backup backup-nfs-logical + local backup_name=backup-nfs-logical + local target_state=ready + set +o xtrace waiting for backup-nfs-logical to reach ready state.......OK + run_recovery_check backup-nfs-logical some-name -2nd '' + local backup=backup-nfs-logical + local cluster=some-name + local find_prefix_before=-2nd + local find_prefix_after= + write_data 100501 -2nd + local x=100501 + local find_prefix=-2nd + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rCRBnpav65 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QkCrQOu9jQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rCRBnpav65 ++ cat /tmp/tmp.QkCrQOu9jQ ++ rm /tmp/tmp.rCRBnpav65 /tmp/tmp.QkCrQOu9jQ ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cbVTYv1GKx ++ mktemp + local LAST_ERR=/tmp/tmp.7Jx9M3hv55 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cbVTYv1GKx Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b9ae3754-e4ea-4e11-9f78-6406c0d362ec") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.7Jx9M3hv55 + rm /tmp/tmp.cbVTYv1GKx /tmp/tmp.7Jx9M3hv55 + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:35:51+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' +++ mktemp + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.FCGaJggXFB +++ mktemp ++ local LAST_ERR=/tmp/tmp.RUTvxHNhdk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FCGaJggXFB ++ cat /tmp/tmp.RUTvxHNhdk ++ rm /tmp/tmp.FCGaJggXFB /tmp/tmp.RUTvxHNhdk ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GOhYbkLq8Z ++ mktemp + local LAST_ERR=/tmp/tmp.9nybSJeNOR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GOhYbkLq8Z + cat /tmp/tmp.9nybSJeNOR + rm /tmp/tmp.GOhYbkLq8Z /tmp/tmp.9nybSJeNOR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find-2nd.json /tmp/tmp.EuB5ISTCpj/find-2nd + run_restore backup-nfs-logical + local backup_name=backup-nfs-logical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-logical' + set +o xtrace [2026-04-23T08:35:54+0000] running restore restore-backup-nfs-logical + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-logical/' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.3TaDtdSnWD ++ mktemp + local LAST_ERR=/tmp/tmp.wjIKMfBU6r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3TaDtdSnWD perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical created + cat /tmp/tmp.wjIKMfBU6r + rm /tmp/tmp.3TaDtdSnWD /tmp/tmp.wjIKMfBU6r + return 0 + wait_restore backup-nfs-logical some-name + local backup_name=backup-nfs-logical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wCWqoTwshF +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kcs4PgGEfs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wCWqoTwshF ++ cat /tmp/tmp.Kcs4PgGEfs ++ rm /tmp/tmp.wCWqoTwshF /tmp/tmp.Kcs4PgGEfs ++ return 0 + [[ ready == ready ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:36:16+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iqCINQjMMW +++ mktemp ++ local LAST_ERR=/tmp/tmp.9XXhkt8oLV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iqCINQjMMW ++ cat /tmp/tmp.9XXhkt8oLV ++ rm /tmp/tmp.iqCINQjMMW /tmp/tmp.9XXhkt8oLV ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vz1bCyHff3 ++ mktemp + local LAST_ERR=/tmp/tmp.zBLp8Gh9au + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vz1bCyHff3 + cat /tmp/tmp.zBLp8Gh9au + rm /tmp/tmp.vz1bCyHff3 /tmp/tmp.zBLp8Gh9au + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.EuB5ISTCpj/find + desc 'CASE 2: Logical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: Logical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical-pitr + run_backup nfs backup-nfs-logical-pitr logical + local storage=nfs + local backup_name=backup-nfs-logical-pitr + local type=logical + log 'running backup backup-nfs-logical-pitr' + set +o xtrace [2026-04-23T08:36:19+0000] running backup backup-nfs-logical-pitr + yq eval $'.metadata.name = "backup-nfs-logical-pitr"\n\t\t\t| .spec.storageName = "nfs"\n\t\t\t| .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Bk52kgG1a2 ++ mktemp + local LAST_ERR=/tmp/tmp.poOHTUaYKU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bk52kgG1a2 perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr created + cat /tmp/tmp.poOHTUaYKU + rm /tmp/tmp.Bk52kgG1a2 /tmp/tmp.poOHTUaYKU + return 0 + wait_backup backup-nfs-logical-pitr + local backup_name=backup-nfs-logical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-logical-pitr to reach ready state.......OK + write_data 100502 -3rd + local x=100502 + local find_prefix=-3rd + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OO4mlr2kli +++ mktemp ++ local LAST_ERR=/tmp/tmp.6dpzFSt4sY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OO4mlr2kli ++ cat /tmp/tmp.6dpzFSt4sY ++ rm /tmp/tmp.OO4mlr2kli /tmp/tmp.6dpzFSt4sY ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.xMReC2RdP2 ++ mktemp + local LAST_ERR=/tmp/tmp.aj8o016WTH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xMReC2RdP2 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ad96be55-1528-459e-914e-47fa809d1f41") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.aj8o016WTH + rm /tmp/tmp.xMReC2RdP2 /tmp/tmp.aj8o016WTH + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:36:37+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.urNSb1l4nc +++ mktemp ++ local LAST_ERR=/tmp/tmp.z9vdYwAP7g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.urNSb1l4nc ++ cat /tmp/tmp.z9vdYwAP7g ++ rm /tmp/tmp.urNSb1l4nc /tmp/tmp.z9vdYwAP7g ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.poqTL5uQ2b ++ mktemp + local LAST_ERR=/tmp/tmp.uxXKazQAuy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.poqTL5uQ2b + cat /tmp/tmp.uxXKazQAuy + rm /tmp/tmp.poqTL5uQ2b /tmp/tmp.uxXKazQAuy + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.EuB5ISTCpj/find-3rd + run_pitr_check backup-nfs-logical-pitr some-name -3rd + local backup=backup-nfs-logical-pitr + local cluster=some-name + local find_prefix=-3rd + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aNbzirIfxl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vh2vQPSaOV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aNbzirIfxl ++ cat /tmp/tmp.Vh2vQPSaOV ++ rm /tmp/tmp.aNbzirIfxl /tmp/tmp.Vh2vQPSaOV ++ return 0 + local backup_last_write=1776933387 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GUhJ8MsGZ7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wZ5ySZlHsv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GUhJ8MsGZ7 +++ cat /tmp/tmp.wZ5ySZlHsv +++ rm /tmp/tmp.GUhJ8MsGZ7 /tmp/tmp.wZ5ySZlHsv +++ return 0 ++ echo 1776933383 + local last_chunk=1776933383 + [[ 1776933383 -gt 1776933387 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FKnrUz4dN2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eUbqL4hXV8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FKnrUz4dN2 +++ cat /tmp/tmp.eUbqL4hXV8 +++ rm /tmp/tmp.FKnrUz4dN2 /tmp/tmp.eUbqL4hXV8 +++ return 0 ++ echo 1776933383 + last_chunk=1776933383 + retries=1 ++ format_date 1776933383 ++ local timestamp=1776933383 +++ TZ=UTC +++ /usr/sbin/date -d@1776933383 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:23 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 + log 'Waiting for last oplog chunk (2026-04-23 08:36:23) to be greater than last write (2026-04-23 08:36:27)' + set +o xtrace [2026-04-23T08:36:45+0000] Waiting for last oplog chunk (2026-04-23 08:36:23) to be greater than last write (2026-04-23 08:36:27) + sleep 10 + [[ 1776933383 -gt 1776933387 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NgBOQTChEs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XAVnPPht5j +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NgBOQTChEs +++ cat /tmp/tmp.XAVnPPht5j +++ rm /tmp/tmp.NgBOQTChEs /tmp/tmp.XAVnPPht5j +++ return 0 ++ echo 1776933387 + last_chunk=1776933387 + retries=2 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 + log 'Waiting for last oplog chunk (2026-04-23 08:36:27) to be greater than last write (2026-04-23 08:36:27)' + set +o xtrace [2026-04-23T08:36:57+0000] Waiting for last oplog chunk (2026-04-23 08:36:27) to be greater than last write (2026-04-23 08:36:27) + sleep 10 + [[ 1776933387 -gt 1776933387 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7404ZVDVPj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Eybm71a2pl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7404ZVDVPj +++ cat /tmp/tmp.Eybm71a2pl +++ rm /tmp/tmp.7404ZVDVPj /tmp/tmp.Eybm71a2pl +++ return 0 ++ echo 1776933387 + last_chunk=1776933387 + retries=3 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 + log 'Waiting for last oplog chunk (2026-04-23 08:36:27) to be greater than last write (2026-04-23 08:36:27)' + set +o xtrace [2026-04-23T08:37:09+0000] Waiting for last oplog chunk (2026-04-23 08:36:27) to be greater than last write (2026-04-23 08:36:27) + sleep 10 + [[ 1776933387 -gt 1776933387 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ku9JPvnOkF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0RHdU3h0EK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ku9JPvnOkF +++ cat /tmp/tmp.0RHdU3h0EK +++ rm /tmp/tmp.ku9JPvnOkF /tmp/tmp.0RHdU3h0EK +++ return 0 ++ echo 1776933387 + last_chunk=1776933387 + retries=4 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 + log 'Waiting for last oplog chunk (2026-04-23 08:36:27) to be greater than last write (2026-04-23 08:36:27)' + set +o xtrace [2026-04-23T08:37:21+0000] Waiting for last oplog chunk (2026-04-23 08:36:27) to be greater than last write (2026-04-23 08:36:27) + sleep 10 + [[ 1776933387 -gt 1776933387 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CdfDLf3KMH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4yaqVjsqEB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CdfDLf3KMH +++ cat /tmp/tmp.4yaqVjsqEB +++ rm /tmp/tmp.CdfDLf3KMH /tmp/tmp.4yaqVjsqEB +++ return 0 ++ echo 1776933444 + last_chunk=1776933444 + retries=5 ++ format_date 1776933444 ++ local timestamp=1776933444 +++ TZ=UTC +++ /usr/sbin/date -d@1776933444 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:37:24 ++ format_date 1776933387 ++ local timestamp=1776933387 +++ TZ=UTC +++ /usr/sbin/date -d@1776933387 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:36:27 + log 'Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:36:27)' + set +o xtrace [2026-04-23T08:37:33+0000] Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:36:27) + sleep 10 + [[ 1776933444 -gt 1776933387 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VIg93D0XAf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DjrOvCgU6g ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.VIg93D0XAf ++++ cat /tmp/tmp.DjrOvCgU6g ++++ rm /tmp/tmp.VIg93D0XAf /tmp/tmp.DjrOvCgU6g ++++ return 0 +++ echo 1776933444 ++ format_date 1776933444 ++ local timestamp=1776933444 +++ TZ=UTC +++ /usr/sbin/date -d@1776933444 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:37:24 + local 'target_time=2026-04-23 08:37:24' + log 'dropping test collection' + set +o xtrace [2026-04-23T08:37:45+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u8pVNXxf71 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cukWuqMGwr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u8pVNXxf71 ++ cat /tmp/tmp.cukWuqMGwr ++ rm /tmp/tmp.u8pVNXxf71 /tmp/tmp.cukWuqMGwr ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QKKKKRUN29 ++ mktemp + local LAST_ERR=/tmp/tmp.tMOvWtbYCH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QKKKKRUN29 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("585f279c-7d9c-44b7-8cb1-62b202f7564b") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.tMOvWtbYCH + rm /tmp/tmp.QKKKKRUN29 /tmp/tmp.tMOvWtbYCH + return 0 + log 'checking pitr... backup: backup-nfs-logical-pitr target: 2026-04-23 08:37:24' + set +o xtrace [2026-04-23T08:37:48+0000] checking pitr... backup: backup-nfs-logical-pitr target: 2026-04-23 08:37:24 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-logical-pitr"' + yq eval '.spec.backupName = "backup-nfs-logical-pitr"' + yq eval '.spec.pitr.date = "2026-04-23 08:37:24"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.34JDCtLQa2 ++ mktemp + local LAST_ERR=/tmp/tmp.NNW7gU5wzN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.34JDCtLQa2 perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical-pitr created + cat /tmp/tmp.NNW7gU5wzN + rm /tmp/tmp.34JDCtLQa2 /tmp/tmp.NNW7gU5wzN + return 0 + wait_restore backup-nfs-logical-pitr some-name + local backup_name=backup-nfs-logical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical-pitr to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t6Te9Eje5e +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ja0NMJujuB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t6Te9Eje5e ++ cat /tmp/tmp.Ja0NMJujuB ++ rm /tmp/tmp.t6Te9Eje5e /tmp/tmp.Ja0NMJujuB ++ return 0 + [[ ready == ready ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:38:10+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F9RN3oI2uO +++ mktemp ++ local LAST_ERR=/tmp/tmp.uPyOIxmvjF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F9RN3oI2uO ++ cat /tmp/tmp.uPyOIxmvjF ++ rm /tmp/tmp.F9RN3oI2uO /tmp/tmp.uPyOIxmvjF ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fnC9fTjT7d ++ mktemp + local LAST_ERR=/tmp/tmp.w8P9nfhvmf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fnC9fTjT7d + cat /tmp/tmp.w8P9nfhvmf + rm /tmp/tmp.fnC9fTjT7d /tmp/tmp.w8P9nfhvmf + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.EuB5ISTCpj/find-3rd + desc 'CASE 3: Physical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 3: Physical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical + run_backup nfs backup-nfs-physical physical + local storage=nfs + local backup_name=backup-nfs-physical + local type=physical + log 'running backup backup-nfs-physical' + set +o xtrace [2026-04-23T08:38:13+0000] running backup backup-nfs-physical + kubectl_bin apply -f - + yq eval $'.metadata.name = "backup-nfs-physical"\n\t\t\t| .spec.storageName = "nfs"\n\t\t\t| .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/backup-nfs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.mSq7K8NJHl ++ mktemp + local LAST_ERR=/tmp/tmp.IJxGJF0qDh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mSq7K8NJHl perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical created + cat /tmp/tmp.IJxGJF0qDh + rm /tmp/tmp.mSq7K8NJHl /tmp/tmp.IJxGJF0qDh + return 0 + wait_backup backup-nfs-physical + local backup_name=backup-nfs-physical + local target_state=ready + set +o xtrace waiting for backup-nfs-physical to reach ready state.......OK + run_recovery_check backup-nfs-physical some-name -4th -3rd + local backup=backup-nfs-physical + local cluster=some-name + local find_prefix_before=-4th + local find_prefix_after=-3rd + write_data 100501 -4th + local x=100501 + local find_prefix=-4th + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hkW9DHHSR6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IxQ3RxghD0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hkW9DHHSR6 ++ cat /tmp/tmp.IxQ3RxghD0 ++ rm /tmp/tmp.hkW9DHHSR6 /tmp/tmp.IxQ3RxghD0 ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NOTvZPpbTP ++ mktemp + local LAST_ERR=/tmp/tmp.kXmRrSh0WG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NOTvZPpbTP Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("40ae1854-4f83-4431-b4e4-5aab450ce997") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.kXmRrSh0WG + rm /tmp/tmp.NOTvZPpbTP /tmp/tmp.kXmRrSh0WG + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 -4th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix=-4th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:38:30+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UDm4cwjkXB +++ mktemp ++ local LAST_ERR=/tmp/tmp.dmklYfqpt3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UDm4cwjkXB ++ cat /tmp/tmp.dmklYfqpt3 ++ rm /tmp/tmp.UDm4cwjkXB /tmp/tmp.dmklYfqpt3 ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kYqBB3hfpX ++ mktemp + local LAST_ERR=/tmp/tmp.6njDzWLCLg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kYqBB3hfpX + cat /tmp/tmp.6njDzWLCLg + rm /tmp/tmp.kYqBB3hfpX /tmp/tmp.6njDzWLCLg + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find-4th.json /tmp/tmp.EuB5ISTCpj/find-4th + run_restore backup-nfs-physical + local backup_name=backup-nfs-physical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-physical' + set +o xtrace [2026-04-23T08:38:33+0000] running restore restore-backup-nfs-physical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-physical/' + kubectl_bin apply -f - + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-physical/' ++ mktemp + local LAST_OUT=/tmp/tmp.ULUnU7jKEp ++ mktemp + local LAST_ERR=/tmp/tmp.ppkpL4hjap + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ULUnU7jKEp perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical created + cat /tmp/tmp.ppkpL4hjap + rm /tmp/tmp.ULUnU7jKEp /tmp/tmp.ppkpL4hjap + return 0 + wait_restore backup-nfs-physical some-name + local backup_name=backup-nfs-physical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical to reach state "ready" .....OK after 4 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wvrykCtBoi +++ mktemp ++ local LAST_ERR=/tmp/tmp.vEIdx8VmAC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wvrykCtBoi ++ cat /tmp/tmp.vEIdx8VmAC ++ rm /tmp/tmp.wvrykCtBoi /tmp/tmp.vEIdx8VmAC ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tOtTSbI9ao +++ mktemp ++ local LAST_ERR=/tmp/tmp.3S1y7RDmSC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tOtTSbI9ao ++ cat /tmp/tmp.3S1y7RDmSC ++ rm /tmp/tmp.tOtTSbI9ao /tmp/tmp.3S1y7RDmSC ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TlSJNs6Oau +++ mktemp ++ local LAST_ERR=/tmp/tmp.KWWQJV4JCd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TlSJNs6Oau ++ cat /tmp/tmp.KWWQJV4JCd ++ rm /tmp/tmp.TlSJNs6Oau /tmp/tmp.KWWQJV4JCd ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EqnjSRMxwm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z8Tqod86nf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EqnjSRMxwm ++ cat /tmp/tmp.Z8Tqod86nf ++ rm /tmp/tmp.EqnjSRMxwm /tmp/tmp.Z8Tqod86nf ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xv3MEW3ddB +++ mktemp ++ local LAST_ERR=/tmp/tmp.zgYFUkUUfZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xv3MEW3ddB ++ cat /tmp/tmp.zgYFUkUUfZ ++ rm /tmp/tmp.xv3MEW3ddB /tmp/tmp.zgYFUkUUfZ ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IKa7nSsb8F +++ mktemp ++ local LAST_ERR=/tmp/tmp.hLnLDF1Kta ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IKa7nSsb8F ++ cat /tmp/tmp.hLnLDF1Kta ++ rm /tmp/tmp.IKa7nSsb8F /tmp/tmp.hLnLDF1Kta ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SOHhy8tJPZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.J7ORsQK4pS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SOHhy8tJPZ ++ cat /tmp/tmp.J7ORsQK4pS ++ rm /tmp/tmp.SOHhy8tJPZ /tmp/tmp.J7ORsQK4pS ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Swig4XYoFH +++ mktemp ++ local LAST_ERR=/tmp/tmp.BP4C9jYZyY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Swig4XYoFH ++ cat /tmp/tmp.BP4C9jYZyY ++ rm /tmp/tmp.Swig4XYoFH /tmp/tmp.BP4C9jYZyY ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2hb3OVcQai +++ mktemp ++ local LAST_ERR=/tmp/tmp.9QR2dyxht6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2hb3OVcQai ++ cat /tmp/tmp.9QR2dyxht6 ++ rm /tmp/tmp.2hb3OVcQai /tmp/tmp.9QR2dyxht6 ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.917IaV17xG +++ mktemp ++ local LAST_ERR=/tmp/tmp.4CMPfgDaFw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.917IaV17xG ++ cat /tmp/tmp.4CMPfgDaFw ++ rm /tmp/tmp.917IaV17xG /tmp/tmp.4CMPfgDaFw ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XvJrhprdmZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.zk2VsR3DBh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XvJrhprdmZ ++ cat /tmp/tmp.zk2VsR3DBh ++ rm /tmp/tmp.XvJrhprdmZ /tmp/tmp.zk2VsR3DBh ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3mBIetth4V +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Ia9Endbl1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3mBIetth4V ++ cat /tmp/tmp.2Ia9Endbl1 ++ rm /tmp/tmp.3mBIetth4V /tmp/tmp.2Ia9Endbl1 ++ return 0 + [[ ready == ready ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:44:57+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8FMon8gJT2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.lb0irvQUh8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8FMon8gJT2 ++ cat /tmp/tmp.lb0irvQUh8 ++ rm /tmp/tmp.8FMon8gJT2 /tmp/tmp.lb0irvQUh8 ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.dXJOc4vBu9 ++ mktemp + local LAST_ERR=/tmp/tmp.TI0myWyOVV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dXJOc4vBu9 + cat /tmp/tmp.TI0myWyOVV + rm /tmp/tmp.dXJOc4vBu9 /tmp/tmp.TI0myWyOVV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.EuB5ISTCpj/find-3rd + desc 'CASE 4: Physical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 4: Physical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical-pitr + run_backup nfs backup-nfs-physical-pitr physical + local storage=nfs + local backup_name=backup-nfs-physical-pitr + local type=physical + log 'running backup backup-nfs-physical-pitr' + set +o xtrace [2026-04-23T08:44:59+0000] running backup backup-nfs-physical-pitr + yq eval $'.metadata.name = "backup-nfs-physical-pitr"\n\t\t\t| .spec.storageName = "nfs"\n\t\t\t| .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zfuO6gmwwl ++ mktemp + local LAST_ERR=/tmp/tmp.UrBtdKphXA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zfuO6gmwwl perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr created + cat /tmp/tmp.UrBtdKphXA + rm /tmp/tmp.zfuO6gmwwl /tmp/tmp.UrBtdKphXA + return 0 + wait_backup backup-nfs-physical-pitr + local backup_name=backup-nfs-physical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-physical-pitr to reach ready state.......OK + write_data 100503 -5th + local x=100503 + local find_prefix=-5th + run_mongo 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VdT8oUwYMh +++ mktemp ++ local LAST_ERR=/tmp/tmp.CJNCvgug2K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VdT8oUwYMh ++ cat /tmp/tmp.CJNCvgug2K ++ rm /tmp/tmp.VdT8oUwYMh /tmp/tmp.CJNCvgug2K ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.U0coPXYCSw ++ mktemp + local LAST_ERR=/tmp/tmp.FxxZsvXAiK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U0coPXYCSw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("06821dc1-bfc4-4409-ba49-fcb5dd3b256e") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.FxxZsvXAiK + rm /tmp/tmp.U0coPXYCSw /tmp/tmp.FxxZsvXAiK + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:45:17+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L8QGerR6oX +++ mktemp ++ local LAST_ERR=/tmp/tmp.xQ5Kf69LiR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L8QGerR6oX ++ cat /tmp/tmp.xQ5Kf69LiR ++ rm /tmp/tmp.L8QGerR6oX /tmp/tmp.xQ5Kf69LiR ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cGzPL9Q3Y9 ++ mktemp + local LAST_ERR=/tmp/tmp.M6Vizw9eEB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cGzPL9Q3Y9 + cat /tmp/tmp.M6Vizw9eEB + rm /tmp/tmp.cGzPL9Q3Y9 /tmp/tmp.M6Vizw9eEB + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.EuB5ISTCpj/find-5th + run_pitr_check backup-nfs-physical-pitr some-name -5th + local backup=backup-nfs-physical-pitr + local cluster=some-name + local find_prefix=-5th + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gRcptyqae5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kx9xxURnRZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gRcptyqae5 ++ cat /tmp/tmp.Kx9xxURnRZ ++ rm /tmp/tmp.gRcptyqae5 /tmp/tmp.Kx9xxURnRZ ++ return 0 + local backup_last_write=1776933906 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4xBm5CvBZg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Rf3A8LrrFF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4xBm5CvBZg +++ cat /tmp/tmp.Rf3A8LrrFF +++ rm /tmp/tmp.4xBm5CvBZg /tmp/tmp.Rf3A8LrrFF +++ return 0 ++ echo 1776933444 + local last_chunk=1776933444 + [[ 1776933444 -gt 1776933906 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cAux5KpUqe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yODJ6a4Pgw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cAux5KpUqe +++ cat /tmp/tmp.yODJ6a4Pgw +++ rm /tmp/tmp.cAux5KpUqe /tmp/tmp.yODJ6a4Pgw +++ return 0 ++ echo 1776933444 + last_chunk=1776933444 + retries=1 ++ format_date 1776933444 ++ local timestamp=1776933444 +++ TZ=UTC +++ /usr/sbin/date -d@1776933444 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:37:24 ++ format_date 1776933906 ++ local timestamp=1776933906 +++ TZ=UTC +++ /usr/sbin/date -d@1776933906 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:45:06 + log 'Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06)' + set +o xtrace [2026-04-23T08:45:25+0000] Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06) + sleep 10 + [[ 1776933444 -gt 1776933906 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o8vyBT76MW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eOqs04TklM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.o8vyBT76MW +++ cat /tmp/tmp.eOqs04TklM +++ rm /tmp/tmp.o8vyBT76MW /tmp/tmp.eOqs04TklM +++ return 0 ++ echo 1776933444 + last_chunk=1776933444 + retries=2 ++ format_date 1776933444 ++ local timestamp=1776933444 +++ TZ=UTC +++ /usr/sbin/date -d@1776933444 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:37:24 ++ format_date 1776933906 ++ local timestamp=1776933906 +++ TZ=UTC +++ /usr/sbin/date -d@1776933906 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:45:06 + log 'Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06)' + set +o xtrace [2026-04-23T08:45:37+0000] Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06) + sleep 10 + [[ 1776933444 -gt 1776933906 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.B86m51Le02 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NzsgpxlziW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.B86m51Le02 +++ cat /tmp/tmp.NzsgpxlziW +++ rm /tmp/tmp.B86m51Le02 /tmp/tmp.NzsgpxlziW +++ return 0 ++ echo 1776933444 + last_chunk=1776933444 + retries=3 ++ format_date 1776933444 ++ local timestamp=1776933444 +++ TZ=UTC +++ /usr/sbin/date -d@1776933444 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:37:24 ++ format_date 1776933906 ++ local timestamp=1776933906 +++ TZ=UTC +++ /usr/sbin/date -d@1776933906 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:45:06 + log 'Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06)' + set +o xtrace [2026-04-23T08:45:49+0000] Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06) + sleep 10 + [[ 1776933444 -gt 1776933906 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5KPXVYDHSk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ulcHnPhSJc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5KPXVYDHSk +++ cat /tmp/tmp.ulcHnPhSJc +++ rm /tmp/tmp.5KPXVYDHSk /tmp/tmp.ulcHnPhSJc +++ return 0 ++ echo 1776933444 + last_chunk=1776933444 + retries=4 ++ format_date 1776933444 ++ local timestamp=1776933444 +++ TZ=UTC +++ /usr/sbin/date -d@1776933444 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:37:24 ++ format_date 1776933906 ++ local timestamp=1776933906 +++ TZ=UTC +++ /usr/sbin/date -d@1776933906 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:45:06 + log 'Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06)' + set +o xtrace [2026-04-23T08:46:01+0000] Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06) + sleep 10 + [[ 1776933444 -gt 1776933906 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ug0NvS3p0Q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jO9zLFKJXU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ug0NvS3p0Q +++ cat /tmp/tmp.jO9zLFKJXU +++ rm /tmp/tmp.ug0NvS3p0Q /tmp/tmp.jO9zLFKJXU +++ return 0 ++ echo 1776933444 + last_chunk=1776933444 + retries=5 ++ format_date 1776933444 ++ local timestamp=1776933444 +++ TZ=UTC +++ /usr/sbin/date -d@1776933444 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:37:24 ++ format_date 1776933906 ++ local timestamp=1776933906 +++ TZ=UTC +++ /usr/sbin/date -d@1776933906 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:45:06 + log 'Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06)' + set +o xtrace [2026-04-23T08:46:13+0000] Waiting for last oplog chunk (2026-04-23 08:37:24) to be greater than last write (2026-04-23 08:45:06) + sleep 10 + [[ 1776933444 -gt 1776933906 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pLQYCnJ95x ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H8gnxkLTnB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pLQYCnJ95x +++ cat /tmp/tmp.H8gnxkLTnB +++ rm /tmp/tmp.pLQYCnJ95x /tmp/tmp.H8gnxkLTnB +++ return 0 ++ echo 1776933981 + last_chunk=1776933981 + retries=6 ++ format_date 1776933981 ++ local timestamp=1776933981 +++ TZ=UTC +++ /usr/sbin/date -d@1776933981 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:46:21 ++ format_date 1776933906 ++ local timestamp=1776933906 +++ TZ=UTC +++ /usr/sbin/date -d@1776933906 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:45:06 + log 'Waiting for last oplog chunk (2026-04-23 08:46:21) to be greater than last write (2026-04-23 08:45:06)' + set +o xtrace [2026-04-23T08:46:25+0000] Waiting for last oplog chunk (2026-04-23 08:46:21) to be greater than last write (2026-04-23 08:45:06) + sleep 10 + [[ 1776933981 -gt 1776933906 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hLnX0dkxcG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.z1A3AxPdAj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hLnX0dkxcG ++++ cat /tmp/tmp.z1A3AxPdAj ++++ rm /tmp/tmp.hLnX0dkxcG /tmp/tmp.z1A3AxPdAj ++++ return 0 +++ echo 1776933981 ++ format_date 1776933981 ++ local timestamp=1776933981 +++ TZ=UTC +++ /usr/sbin/date -d@1776933981 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-04-23 08:46:21 + local 'target_time=2026-04-23 08:46:21' + log 'dropping test collection' + set +o xtrace [2026-04-23T08:46:37+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qrt8A8BozS +++ mktemp ++ local LAST_ERR=/tmp/tmp.0nF2mO5cNp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qrt8A8BozS ++ cat /tmp/tmp.0nF2mO5cNp ++ rm /tmp/tmp.Qrt8A8BozS /tmp/tmp.0nF2mO5cNp ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.aExGmxhYHu ++ mktemp + local LAST_ERR=/tmp/tmp.FZmToNk4l4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aExGmxhYHu Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-27812.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c0788b52-43e2-40bb-aac5-82356d579b33") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.FZmToNk4l4 + rm /tmp/tmp.aExGmxhYHu /tmp/tmp.FZmToNk4l4 + return 0 + log 'checking pitr... backup: backup-nfs-physical-pitr target: 2026-04-23 08:46:21' + set +o xtrace [2026-04-23T08:46:39+0000] checking pitr... backup: backup-nfs-physical-pitr target: 2026-04-23 08:46:21 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-physical-pitr"' + yq eval '.spec.backupName = "backup-nfs-physical-pitr"' + yq eval '.spec.pitr.date = "2026-04-23 08:46:21"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.kMoRWmRTAV ++ mktemp + local LAST_ERR=/tmp/tmp.Xg9Mp4yq5p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kMoRWmRTAV perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical-pitr created + cat /tmp/tmp.Xg9Mp4yq5p + rm /tmp/tmp.kMoRWmRTAV /tmp/tmp.Xg9Mp4yq5p + return 0 + wait_restore backup-nfs-physical-pitr some-name + local backup_name=backup-nfs-physical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical-pitr to reach state "ready" ......OK after 5 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LBGy8fCQ17 +++ mktemp ++ local LAST_ERR=/tmp/tmp.L6aOwA1Fze ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LBGy8fCQ17 ++ cat /tmp/tmp.L6aOwA1Fze ++ rm /tmp/tmp.LBGy8fCQ17 /tmp/tmp.L6aOwA1Fze ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oHzeQoQbau +++ mktemp ++ local LAST_ERR=/tmp/tmp.JZLhZviFp0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oHzeQoQbau ++ cat /tmp/tmp.JZLhZviFp0 ++ rm /tmp/tmp.oHzeQoQbau /tmp/tmp.JZLhZviFp0 ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KWQMubd3GV +++ mktemp ++ local LAST_ERR=/tmp/tmp.nsLJ4wNyiC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KWQMubd3GV ++ cat /tmp/tmp.nsLJ4wNyiC ++ rm /tmp/tmp.KWQMubd3GV /tmp/tmp.nsLJ4wNyiC ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZUSKdEhtBz +++ mktemp ++ local LAST_ERR=/tmp/tmp.8ivJT3DJpe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZUSKdEhtBz ++ cat /tmp/tmp.8ivJT3DJpe ++ rm /tmp/tmp.ZUSKdEhtBz /tmp/tmp.8ivJT3DJpe ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SJuxUb5ye9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.842cumRYBQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SJuxUb5ye9 ++ cat /tmp/tmp.842cumRYBQ ++ rm /tmp/tmp.SJuxUb5ye9 /tmp/tmp.842cumRYBQ ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6UhaYiLtAo +++ mktemp ++ local LAST_ERR=/tmp/tmp.AWWaEtUwY1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6UhaYiLtAo ++ cat /tmp/tmp.AWWaEtUwY1 ++ rm /tmp/tmp.6UhaYiLtAo /tmp/tmp.AWWaEtUwY1 ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uMfN6GcGWm +++ mktemp ++ local LAST_ERR=/tmp/tmp.inPyuRfPPm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uMfN6GcGWm ++ cat /tmp/tmp.inPyuRfPPm ++ rm /tmp/tmp.uMfN6GcGWm /tmp/tmp.inPyuRfPPm ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X4rdLjPYno +++ mktemp ++ local LAST_ERR=/tmp/tmp.geP52aoeQL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X4rdLjPYno ++ cat /tmp/tmp.geP52aoeQL ++ rm /tmp/tmp.X4rdLjPYno /tmp/tmp.geP52aoeQL ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uHFNM8f9H2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rqCqZ8p6f0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uHFNM8f9H2 ++ cat /tmp/tmp.rqCqZ8p6f0 ++ rm /tmp/tmp.uHFNM8f9H2 /tmp/tmp.rqCqZ8p6f0 ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mpNREnhs9v +++ mktemp ++ local LAST_ERR=/tmp/tmp.UCVG5LXgZt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mpNREnhs9v ++ cat /tmp/tmp.UCVG5LXgZt ++ rm /tmp/tmp.mpNREnhs9v /tmp/tmp.UCVG5LXgZt ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oPwlNfqNKn +++ mktemp ++ local LAST_ERR=/tmp/tmp.89LSId6zkN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oPwlNfqNKn ++ cat /tmp/tmp.89LSId6zkN ++ rm /tmp/tmp.oPwlNfqNKn /tmp/tmp.89LSId6zkN ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xaaDjVXAmf +++ mktemp ++ local LAST_ERR=/tmp/tmp.xfnoA1DmMM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xaaDjVXAmf ++ cat /tmp/tmp.xfnoA1DmMM ++ rm /tmp/tmp.xaaDjVXAmf /tmp/tmp.xfnoA1DmMM ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v8Kdk0ugQa +++ mktemp ++ local LAST_ERR=/tmp/tmp.hCcB617vr0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v8Kdk0ugQa ++ cat /tmp/tmp.hCcB617vr0 ++ rm /tmp/tmp.v8Kdk0ugQa /tmp/tmp.hCcB617vr0 ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xCxLuiHPtt +++ mktemp ++ local LAST_ERR=/tmp/tmp.pG7b2AE7L6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xCxLuiHPtt ++ cat /tmp/tmp.pG7b2AE7L6 ++ rm /tmp/tmp.xCxLuiHPtt /tmp/tmp.pG7b2AE7L6 ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 14 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zO94Si2riu +++ mktemp ++ local LAST_ERR=/tmp/tmp.hViQjDe4Ed ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zO94Si2riu ++ cat /tmp/tmp.hViQjDe4Ed ++ rm /tmp/tmp.zO94Si2riu /tmp/tmp.hViQjDe4Ed ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 15 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.buBKAMCpEt +++ mktemp ++ local LAST_ERR=/tmp/tmp.6XXtP4Hude ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.buBKAMCpEt ++ cat /tmp/tmp.6XXtP4Hude ++ rm /tmp/tmp.buBKAMCpEt /tmp/tmp.6XXtP4Hude ++ return 0 + [[ error == ready ]] + let retry+=1 + '[' 16 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J4Ikqn3vqg +++ mktemp ++ local LAST_ERR=/tmp/tmp.YPinj72WXQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J4Ikqn3vqg ++ cat /tmp/tmp.YPinj72WXQ ++ rm /tmp/tmp.J4Ikqn3vqg /tmp/tmp.YPinj72WXQ ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 17 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jD3XXd8IPu +++ mktemp ++ local LAST_ERR=/tmp/tmp.rdIBXf9KvE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jD3XXd8IPu ++ cat /tmp/tmp.rdIBXf9KvE ++ rm /tmp/tmp.jD3XXd8IPu /tmp/tmp.rdIBXf9KvE ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 18 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wdEIKJBrSz +++ mktemp ++ local LAST_ERR=/tmp/tmp.r6uKcuogXC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wdEIKJBrSz ++ cat /tmp/tmp.r6uKcuogXC ++ rm /tmp/tmp.wdEIKJBrSz /tmp/tmp.r6uKcuogXC ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 19 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Crp7X94GqF +++ mktemp ++ local LAST_ERR=/tmp/tmp.IYagZi2OvR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Crp7X94GqF ++ cat /tmp/tmp.IYagZi2OvR ++ rm /tmp/tmp.Crp7X94GqF /tmp/tmp.IYagZi2OvR ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 20 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lAbhS2XnTb +++ mktemp ++ local LAST_ERR=/tmp/tmp.YnWbRtTDVQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lAbhS2XnTb ++ cat /tmp/tmp.YnWbRtTDVQ ++ rm /tmp/tmp.lAbhS2XnTb /tmp/tmp.YnWbRtTDVQ ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 21 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.15MlpKnN0R +++ mktemp ++ local LAST_ERR=/tmp/tmp.NOjf9KZaY1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.15MlpKnN0R ++ cat /tmp/tmp.NOjf9KZaY1 ++ rm /tmp/tmp.15MlpKnN0R /tmp/tmp.NOjf9KZaY1 ++ return 0 + [[ initializing == ready ]] + let retry+=1 + '[' 22 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZcZKXjogRG +++ mktemp ++ local LAST_ERR=/tmp/tmp.vmIA7qDCfv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZcZKXjogRG ++ cat /tmp/tmp.vmIA7qDCfv ++ rm /tmp/tmp.ZcZKXjogRG /tmp/tmp.vmIA7qDCfv ++ return 0 + [[ ready == ready ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-27812 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T08:56:30+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-27812 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-27812 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-27812 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RST6imy12G +++ mktemp ++ local LAST_ERR=/tmp/tmp.F3dwg7FyYp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RST6imy12G ++ cat /tmp/tmp.F3dwg7FyYp ++ rm /tmp/tmp.RST6imy12G /tmp/tmp.F3dwg7FyYp ++ return 0 + local client_container=psmdb-client-bb8b97679-9h5q5 + kubectl_bin exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.dsuQr48NIx ++ mktemp + local LAST_ERR=/tmp/tmp.GnWyX4p5B6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-9h5q5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-27812.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dsuQr48NIx + cat /tmp/tmp.GnWyX4p5B6 + rm /tmp/tmp.dsuQr48NIx /tmp/tmp.GnWyX4p5B6 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.EuB5ISTCpj/find-5th + destroy demand-backup-fs-27812 + local namespace=demand-backup-fs-27812 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.WZsjEl971R +++ mktemp ++ local LAST_ERR=/tmp/tmp.puQBVPT08W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WZsjEl971R ++ cat /tmp/tmp.puQBVPT08W ++ rm /tmp/tmp.WZsjEl971R /tmp/tmp.puQBVPT08W ++ return 0 + '[' 4 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.eUpcdlctRR ++ mktemp + local LAST_ERR=/tmp/tmp.PeFmds8Fi8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eUpcdlctRR NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-nfs-logical some-name nfs /mnt/nfs/2026-04-23T08:35:36Z logical 40.86KB ready 20m 20m backup-nfs-logical-pitr some-name nfs /mnt/nfs/2026-04-23T08:36:22Z logical 44.68KB ready 20m 20m backup-nfs-physical some-name nfs /mnt/nfs/2026-04-23T08:38:15Z physical 1.48MB ready 18m 18m backup-nfs-physical-pitr some-name nfs /mnt/nfs/2026-04-23T08:45:02Z physical 979.39KB ready 11m 11m + cat /tmp/tmp.PeFmds8Fi8 + rm /tmp/tmp.eUpcdlctRR /tmp/tmp.PeFmds8Fi8 + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.W0LGg5KnfE ++ mktemp + local LAST_ERR=/tmp/tmp.WBMOqdzZXw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W0LGg5KnfE perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical" deleted from demand-backup-fs-27812 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical-pitr" deleted from demand-backup-fs-27812 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical" deleted from demand-backup-fs-27812 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical-pitr" deleted from demand-backup-fs-27812 namespace + cat /tmp/tmp.WBMOqdzZXw + rm /tmp/tmp.W0LGg5KnfE /tmp/tmp.WBMOqdzZXw + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.fep5mtcMO2 ++ mktemp + local LAST_ERR=/tmp/tmp.TbchylMzZR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fep5mtcMO2 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.TbchylMzZR + rm /tmp/tmp.fep5mtcMO2 /tmp/tmp.TbchylMzZR + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.J55vGpdJ3n ++ mktemp + local LAST_ERR=/tmp/tmp.bthgveGkRw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J55vGpdJ3n + cat /tmp/tmp.bthgveGkRw + rm /tmp/tmp.J55vGpdJ3n /tmp/tmp.bthgveGkRw + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Ryd9YKxfAu ++ mktemp + local LAST_ERR=/tmp/tmp.TanHXQJoW7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ryd9YKxfAu + cat /tmp/tmp.TanHXQJoW7 + rm /tmp/tmp.Ryd9YKxfAu /tmp/tmp.TanHXQJoW7 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-27812 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.94JTqlDYK3 ++ mktemp + local LAST_ERR=/tmp/tmp.A9TyDenANo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.94JTqlDYK3 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.A9TyDenANo + rm /tmp/tmp.94JTqlDYK3 /tmp/tmp.A9TyDenANo + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Bqk04hNbPG ++ mktemp + local LAST_ERR=/tmp/tmp.g3kevMNXvd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bqk04hNbPG clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.g3kevMNXvd + rm /tmp/tmp.Bqk04hNbPG /tmp/tmp.g3kevMNXvd + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TG6vPRfdtD ++ mktemp + local LAST_ERR=/tmp/tmp.MSr9IEeQ25 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TG6vPRfdtD + cat /tmp/tmp.MSr9IEeQ25 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TG6vPRfdtD + cat /tmp/tmp.MSr9IEeQ25 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TG6vPRfdtD + cat /tmp/tmp.MSr9IEeQ25 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.TG6vPRfdtD + cat /tmp/tmp.MSr9IEeQ25 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.TG6vPRfdtD /tmp/tmp.MSr9IEeQ25 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-fs-27812 + rm -rf /tmp/tmp.EuB5ISTCpj + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed+ local LAST_OUT=/tmp/tmp.hHVPszLdRA ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.wVrSc61YCx ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Ox8g7oByV5 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.zEgca69HPQ + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-fs-27812 + kubectl delete --grace-period=0 --force=true namespace psmdb-operator