Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/logs/demand-backup-fs.log Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=some-name + create_infra demand-backup-fs-16203 + local ns=demand-backup-fs-16203 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.fsnUXJfS5m ++ mktemp + local LAST_ERR=/tmp/tmp.PuVdwlgTvK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fsnUXJfS5m customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.PuVdwlgTvK + rm /tmp/tmp.fsnUXJfS5m /tmp/tmp.PuVdwlgTvK + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-15714 backup-nfs-logical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-15714 backup-nfs-logical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-15714 backup-nfs-physical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-15714 backup-nfs-physical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.LgKKY6MMJn ++ mktemp + local LAST_ERR=/tmp/tmp.Puuw4sa8bQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LgKKY6MMJn customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.Puuw4sa8bQ + rm /tmp/tmp.LgKKY6MMJn /tmp/tmp.Puuw4sa8bQ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UJ8nb7PZtS ++ mktemp + local LAST_ERR=/tmp/tmp.z6xX8eqF8Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UJ8nb7PZtS + cat /tmp/tmp.z6xX8eqF8Q + rm /tmp/tmp.UJ8nb7PZtS /tmp/tmp.z6xX8eqF8Q + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-15714 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.g5AUgRtQQu ++ mktemp + local LAST_ERR=/tmp/tmp.vW82imlt9G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g5AUgRtQQu customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.vW82imlt9G + rm /tmp/tmp.g5AUgRtQQu /tmp/tmp.vW82imlt9G + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.P8X7mWCEKV ++ mktemp + local LAST_ERR=/tmp/tmp.3IFPR8L7KB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P8X7mWCEKV clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3IFPR8L7KB + rm /tmp/tmp.P8X7mWCEKV /tmp/tmp.3IFPR8L7KB + return 0 + check_crd_for_deletion PR-2213-482da7af + local git_tag=PR-2213-482da7af ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2213-482da7af/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wyDhmam478 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SlEazNwp6N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.wyDhmam478 ++ cat /tmp/tmp.SlEazNwp6N Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.wyDhmam478 ++ cat /tmp/tmp.SlEazNwp6N Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.wyDhmam478 ++ cat /tmp/tmp.SlEazNwp6N Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.wyDhmam478 ++ cat /tmp/tmp.SlEazNwp6N Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.wyDhmam478 /tmp/tmp.SlEazNwp6N ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.Did83bS6oZ ++ mktemp + local LAST_OUT=/tmp/tmp.KubZNHa925 + local LAST_ERR=/tmp/tmp.u5GdFfZNfB + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.jTtDf1dqcy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Did83bS6oZ + cat /tmp/tmp.u5GdFfZNfB + rm /tmp/tmp.Did83bS6oZ /tmp/tmp.u5GdFfZNfB + return 0 namespace "demand-backup-fs-15714" deleted namespace "storage" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KubZNHa925 namespace "psmdb-operator" deleted + cat /tmp/tmp.jTtDf1dqcy + rm /tmp/tmp.KubZNHa925 /tmp/tmp.jTtDf1dqcy + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.VrgPaC4Iyk ++ mktemp + local LAST_ERR=/tmp/tmp.X4N9MZq9ab + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VrgPaC4Iyk + cat /tmp/tmp.X4N9MZq9ab + rm /tmp/tmp.VrgPaC4Iyk /tmp/tmp.X4N9MZq9ab + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.VhPJbU1180 ++ mktemp + local LAST_ERR=/tmp/tmp.cz1bcQzEZs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VhPJbU1180 namespace/psmdb-operator created + cat /tmp/tmp.cz1bcQzEZs + rm /tmp/tmp.VhPJbU1180 /tmp/tmp.cz1bcQzEZs + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.90jhe9mtc8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5xl9Bo4yQo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.90jhe9mtc8 ++ cat /tmp/tmp.5xl9Bo4yQo ++ rm /tmp/tmp.90jhe9mtc8 /tmp/tmp.5xl9Bo4yQo ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2213-482da7af-11-cluster12 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.TQS00GRxdz ++ mktemp + local LAST_ERR=/tmp/tmp.Gg9pkqLkDl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2213-482da7af-11-cluster12 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TQS00GRxdz Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2213-482da7af-11-cluster12" modified. + cat /tmp/tmp.Gg9pkqLkDl + rm /tmp/tmp.TQS00GRxdz /tmp/tmp.Gg9pkqLkDl + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2213-482da7af' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2213-482da7af ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ThJwRc2B7H ++ mktemp + local LAST_ERR=/tmp/tmp.l8Vc95LcMI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ThJwRc2B7H customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.l8Vc95LcMI + rm /tmp/tmp.ThJwRc2B7H /tmp/tmp.l8Vc95LcMI + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9amudSWXiN ++ mktemp + local LAST_ERR=/tmp/tmp.v2JUFEOyNM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9amudSWXiN clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.v2JUFEOyNM + rm /tmp/tmp.9amudSWXiN /tmp/tmp.v2JUFEOyNM + return 0 + kubectl_bin apply -n psmdb-operator -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2213-482da7af") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7LT6n8bGHH ++ mktemp + local LAST_ERR=/tmp/tmp.1xaM2nTAW7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7LT6n8bGHH deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.1xaM2nTAW7 + rm /tmp/tmp.7LT6n8bGHH /tmp/tmp.1xaM2nTAW7 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.iAvbaVBraV +++ mktemp ++ local LAST_ERR=/tmp/tmp.wB4QM4nTQa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iAvbaVBraV ++ cat /tmp/tmp.wB4QM4nTQa ++ rm /tmp/tmp.iAvbaVBraV /tmp/tmp.wB4QM4nTQa ++ return 0 + wait_operator_pod percona-server-mongodb-operator-68bf6755c6-c7fb4 + local pod=percona-server-mongodb-operator-68bf6755c6-c7fb4 + set +o xtrace waiting for pod/percona-server-mongodb-operator-68bf6755c6-c7fb4 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.iGIbN6NZtX +++ mktemp ++ local LAST_ERR=/tmp/tmp.oRCZ1ygk7f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iGIbN6NZtX ++ cat /tmp/tmp.oRCZ1ygk7f ++ rm /tmp/tmp.iGIbN6NZtX /tmp/tmp.oRCZ1ygk7f ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-68bf6755c6-c7fb4 ++ mktemp + local LAST_OUT=/tmp/tmp.UKULB0uykv ++ mktemp + local LAST_ERR=/tmp/tmp.NSmwACDzH9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-68bf6755c6-c7fb4 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UKULB0uykv + cat /tmp/tmp.NSmwACDzH9 + rm /tmp/tmp.UKULB0uykv /tmp/tmp.NSmwACDzH9 + return 0 2026-01-23T11:51:36.933Z INFO setup Manager starting up {"gitCommit": "482da7afc76cb5e5983e0dafeee205d21b673bb3", "gitBranch": "PR-2213-482da7af", "buildTime": "", "goVersion": "go1.25.6", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-fs-16203 + local namespace=demand-backup-fs-16203 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-fs-16203' + set +o xtrace + awk '{print$1}' ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-fs-16203 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-fs-16203 --ignore-not-found + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.xWlczGfg6Q + local LAST_OUT=/tmp/tmp.76ObgyLK1K ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Nba7mrUzc2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.QKWHzTILoY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-fs-16203 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xWlczGfg6Q + cat /tmp/tmp.Nba7mrUzc2 + rm /tmp/tmp.xWlczGfg6Q /tmp/tmp.Nba7mrUzc2 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.76ObgyLK1K + cat /tmp/tmp.QKWHzTILoY + rm /tmp/tmp.76ObgyLK1K /tmp/tmp.QKWHzTILoY + return 0 + kubectl_bin wait --for=delete namespace demand-backup-fs-16203 ++ mktemp + local LAST_OUT=/tmp/tmp.Yu1HyoKQeD ++ mktemp + local LAST_ERR=/tmp/tmp.dgbJzlu5f8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-fs-16203 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yu1HyoKQeD + cat /tmp/tmp.dgbJzlu5f8 + rm /tmp/tmp.Yu1HyoKQeD /tmp/tmp.dgbJzlu5f8 + return 0 + desc 'create namespace demand-backup-fs-16203' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-fs-16203 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-fs-16203 ++ mktemp + local LAST_OUT=/tmp/tmp.ThLsoZreFW ++ mktemp + local LAST_ERR=/tmp/tmp.DVBh6MJu9l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-fs-16203 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ThLsoZreFW namespace/demand-backup-fs-16203 created + cat /tmp/tmp.DVBh6MJu9l + rm /tmp/tmp.ThLsoZreFW /tmp/tmp.DVBh6MJu9l + return 0 + set_kube_ctx demand-backup-fs-16203 + local namespace=demand-backup-fs-16203 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.oj2mNIspQn +++ mktemp ++ local LAST_ERR=/tmp/tmp.KdJot2XpM2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oj2mNIspQn ++ cat /tmp/tmp.KdJot2XpM2 ++ rm /tmp/tmp.oj2mNIspQn /tmp/tmp.KdJot2XpM2 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2213-482da7af-11-cluster12 --namespace=demand-backup-fs-16203 ++ mktemp + local LAST_OUT=/tmp/tmp.I83GXu27lY ++ mktemp + local LAST_ERR=/tmp/tmp.XeCSXFvS2f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2213-482da7af-11-cluster12 --namespace=demand-backup-fs-16203 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I83GXu27lY Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2213-482da7af-11-cluster12" modified. + cat /tmp/tmp.XeCSXFvS2f + rm /tmp/tmp.I83GXu27lY /tmp/tmp.XeCSXFvS2f + return 0 + kubectl_bin delete ns storage ++ mktemp + local LAST_OUT=/tmp/tmp.giBgaZiySj ++ mktemp + local LAST_ERR=/tmp/tmp.uhXvffz9UV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.giBgaZiySj + cat /tmp/tmp.uhXvffz9UV Error from server (NotFound): namespaces "storage" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.giBgaZiySj + cat /tmp/tmp.uhXvffz9UV Error from server (NotFound): namespaces "storage" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.giBgaZiySj + cat /tmp/tmp.uhXvffz9UV Error from server (NotFound): namespaces "storage" not found + sleep 8 + cat /tmp/tmp.giBgaZiySj + cat /tmp/tmp.uhXvffz9UV Error from server (NotFound): namespaces "storage" not found + rm /tmp/tmp.giBgaZiySj /tmp/tmp.uhXvffz9UV + return 1 + : + [[ 1 != 1 ]] + uid=1001 + [[ -n '' ]] + log 'deploying NFS server' + set +o xtrace [2026-01-23T11:52:25+0000] deploying NFS server + deploy_nfs_server 1001 + local uid=1001 + kubectl_bin create namespace storage ++ mktemp + local LAST_OUT=/tmp/tmp.jYQQYPfD37 ++ mktemp + local LAST_ERR=/tmp/tmp.k3IHDNpkWk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace storage + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jYQQYPfD37 namespace/storage created + cat /tmp/tmp.k3IHDNpkWk + rm /tmp/tmp.jYQQYPfD37 /tmp/tmp.k3IHDNpkWk + return 0 + kubectl_bin apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/nfs-server.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ymAdarqQXI ++ mktemp + local LAST_ERR=/tmp/tmp.KyhJJRcC7P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/nfs-server.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ymAdarqQXI serviceaccount/nfs-server created rolebinding.rbac.authorization.k8s.io/system:openshift:scc:privileged created persistentvolumeclaim/nfs-pvc created deployment.apps/nfs-server created service/nfs-service created + cat /tmp/tmp.KyhJJRcC7P + rm /tmp/tmp.ymAdarqQXI /tmp/tmp.KyhJJRcC7P + return 0 + sleep 5 ++ kubectl_bin get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NKPDYOAkRY +++ mktemp ++ local LAST_ERR=/tmp/tmp.sq4rYmvtxB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NKPDYOAkRY ++ cat /tmp/tmp.sq4rYmvtxB ++ rm /tmp/tmp.NKPDYOAkRY /tmp/tmp.sq4rYmvtxB ++ return 0 + local nfsPod=nfs-server-64ccf79cf6-26gr2 ++ kubectl_bin get pod nfs-server-64ccf79cf6-26gr2 -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EZ0cUADXbo +++ mktemp ++ local LAST_ERR=/tmp/tmp.lLg2j59KC7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-26gr2 -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EZ0cUADXbo ++ cat /tmp/tmp.lLg2j59KC7 ++ rm /tmp/tmp.EZ0cUADXbo /tmp/tmp.lLg2j59KC7 ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-64ccf79cf6-26gr2 to start Running' + set +o xtrace [2026-01-23T11:52:36+0000] Waiting for nfs-server-64ccf79cf6-26gr2 to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-64ccf79cf6-26gr2 -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lev2FUxxQk +++ mktemp ++ local LAST_ERR=/tmp/tmp.252mhvEHQL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-26gr2 -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lev2FUxxQk ++ cat /tmp/tmp.252mhvEHQL ++ rm /tmp/tmp.lev2FUxxQk /tmp/tmp.252mhvEHQL ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-64ccf79cf6-26gr2 to start Running' + set +o xtrace [2026-01-23T11:52:39+0000] Waiting for nfs-server-64ccf79cf6-26gr2 to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-64ccf79cf6-26gr2 -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VzVCI9TV8e +++ mktemp ++ local LAST_ERR=/tmp/tmp.jaTEGlex6c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-26gr2 -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VzVCI9TV8e ++ cat /tmp/tmp.jaTEGlex6c ++ rm /tmp/tmp.VzVCI9TV8e /tmp/tmp.jaTEGlex6c ++ return 0 + [[ Running == \R\u\n\n\i\n\g ]] + kubectl_bin exec -n storage nfs-server-64ccf79cf6-26gr2 -- mkdir /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.IRMiE8M7RX ++ mktemp + local LAST_ERR=/tmp/tmp.iJiqTUYQeg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-64ccf79cf6-26gr2 -- mkdir /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IRMiE8M7RX + cat /tmp/tmp.iJiqTUYQeg + rm /tmp/tmp.IRMiE8M7RX /tmp/tmp.iJiqTUYQeg + return 0 + kubectl_bin exec -n storage nfs-server-64ccf79cf6-26gr2 -- chown 1001:1001 /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.mh0ARI9X21 ++ mktemp + local LAST_ERR=/tmp/tmp.1pKm0uuhen + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-64ccf79cf6-26gr2 -- chown 1001:1001 /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mh0ARI9X21 + cat /tmp/tmp.1pKm0uuhen + rm /tmp/tmp.mh0ARI9X21 /tmp/tmp.1pKm0uuhen + return 0 + log 'creating secrets and start client' + set +o xtrace [2026-01-23T11:52:44+0000] creating secrets and start client + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.FMG4uotst0 ++ mktemp + local LAST_ERR=/tmp/tmp.qoGuuX4nUO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FMG4uotst0 secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.qoGuuX4nUO + rm /tmp/tmp.FMG4uotst0 /tmp/tmp.qoGuuX4nUO + return 0 + [[ -n '' ]] + log 'creating PSMDB cluster some-name' + set +o xtrace [2026-01-23T11:52:46+0000] creating PSMDB cluster some-name + [[ 1 != 1 ]] + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2213-482da7af"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-fs-16203/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.HnO5yeg4af ++ mktemp + local LAST_ERR=/tmp/tmp.ddpPmHNgQM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HnO5yeg4af perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.ddpPmHNgQM + rm /tmp/tmp.HnO5yeg4af /tmp/tmp.ddpPmHNgQM + return 0 + log 'wait for all 3 pods to start' + set +o xtrace [2026-01-23T11:52:49+0000] wait for all 3 pods to start + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NYWFMH5NSm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gfxd8L0E10 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NYWFMH5NSm ++ cat /tmp/tmp.Gfxd8L0E10 ++ rm /tmp/tmp.NYWFMH5NSm /tmp/tmp.Gfxd8L0E10 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WM6HU6KXZA +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pc04EqmFfO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WM6HU6KXZA ++ cat /tmp/tmp.Pc04EqmFfO ++ rm /tmp/tmp.WM6HU6KXZA /tmp/tmp.Pc04EqmFfO ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JIldNJG0hi +++ mktemp ++ local LAST_ERR=/tmp/tmp.1Imb95ZGGY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JIldNJG0hi ++ cat /tmp/tmp.1Imb95ZGGY ++ rm /tmp/tmp.JIldNJG0hi /tmp/tmp.1Imb95ZGGY ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + [[ 1 == 1 ]] + log 'checking if statefulset created with expected config' + set +o xtrace [2026-01-23T11:54:53+0000] checking if statefulset created with expected config + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.xOEYGWTFTX/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-fs-16203", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.xHY98y3CSO ++ mktemp + local LAST_ERR=/tmp/tmp.0O8wVnBRht + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xHY98y3CSO + cat /tmp/tmp.0O8wVnBRht + rm /tmp/tmp.xHY98y3CSO /tmp/tmp.0O8wVnBRht + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.xOEYGWTFTX/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.xOEYGWTFTX/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.xOEYGWTFTX/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml /tmp/tmp.xOEYGWTFTX/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2026-01-23T11:54:54+0000] compare_kubectl: statefulset/some-name-rs0 OK + log 'creating user' + set +o xtrace [2026-01-23T11:54:54+0000] creating user + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-16203 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.91NlR2Qu08 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3QbOL12WWm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.91NlR2Qu08 ++ cat /tmp/tmp.3QbOL12WWm ++ rm /tmp/tmp.91NlR2Qu08 /tmp/tmp.3QbOL12WWm ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.MDtI7Iyi3v ++ mktemp + local LAST_ERR=/tmp/tmp.lAo4CwBv72 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MDtI7Iyi3v Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0530bd4b-ab2e-4506-98b3-3311237ad46e") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.lAo4CwBv72 + rm /tmp/tmp.MDtI7Iyi3v /tmp/tmp.lAo4CwBv72 + return 0 + sleep 2 + log 'write initial data' + set +o xtrace [2026-01-23T11:54:59+0000] write initial data + write_data 100500 '' + local x=100500 + local find_prefix= + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aC8IAqY66o +++ mktemp ++ local LAST_ERR=/tmp/tmp.BEUawsuZhA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aC8IAqY66o ++ cat /tmp/tmp.BEUawsuZhA ++ rm /tmp/tmp.aC8IAqY66o /tmp/tmp.BEUawsuZhA ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.n7H5QcRbzw ++ mktemp + local LAST_ERR=/tmp/tmp.zSVRxWlMY8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n7H5QcRbzw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("dfe86781-7a4a-4104-94c5-a5fae70bfb15") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zSVRxWlMY8 + rm /tmp/tmp.n7H5QcRbzw /tmp/tmp.zSVRxWlMY8 + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T11:55:01+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X78ewEXGsv +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mpz2RyCOgC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X78ewEXGsv ++ cat /tmp/tmp.Mpz2RyCOgC ++ rm /tmp/tmp.X78ewEXGsv /tmp/tmp.Mpz2RyCOgC ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EHBMp8wd6D ++ mktemp + local LAST_ERR=/tmp/tmp.EP3ZawmVMc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EHBMp8wd6D + cat /tmp/tmp.EP3ZawmVMc + rm /tmp/tmp.EHBMp8wd6D /tmp/tmp.EP3ZawmVMc + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.xOEYGWTFTX/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-01-23T11:54:11.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-01-23T11:54:49.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-01-23T11:54:51.000+0000 I listening for the commands + desc 'CASE 1: Logical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: Logical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical + run_backup nfs backup-nfs-logical logical + local storage=nfs + local backup_name=backup-nfs-logical + local type=logical + log 'running backup backup-nfs-logical' + set +o xtrace [2026-01-23T11:55:09+0000] running backup backup-nfs-logical + yq eval '.metadata.name = "backup-nfs-logical" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Jh3Hiez7tW ++ mktemp + local LAST_ERR=/tmp/tmp.eGrZP7xihS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Jh3Hiez7tW perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical created + cat /tmp/tmp.eGrZP7xihS + rm /tmp/tmp.Jh3Hiez7tW /tmp/tmp.eGrZP7xihS + return 0 + wait_backup backup-nfs-logical + local backup_name=backup-nfs-logical + local target_state=ready + set +o xtrace waiting for backup-nfs-logical to reach ready state.......OK + run_recovery_check backup-nfs-logical some-name -2nd '' + local backup=backup-nfs-logical + local cluster=some-name + local find_prefix_before=-2nd + local find_prefix_after= + write_data 100501 -2nd + local x=100501 + local find_prefix=-2nd + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GLuDJKGPgL +++ mktemp ++ local LAST_ERR=/tmp/tmp.efken4QSx8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GLuDJKGPgL ++ cat /tmp/tmp.efken4QSx8 ++ rm /tmp/tmp.GLuDJKGPgL /tmp/tmp.efken4QSx8 ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8woq7slbez ++ mktemp + local LAST_ERR=/tmp/tmp.hd97tnV2pM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8woq7slbez Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3af0909d-ba8c-4284-9f46-1f0842da9e5e") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.hd97tnV2pM + rm /tmp/tmp.8woq7slbez /tmp/tmp.hd97tnV2pM + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T11:55:25+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gp8ktMNuhi +++ mktemp ++ local LAST_ERR=/tmp/tmp.u20RiBSdgp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gp8ktMNuhi ++ cat /tmp/tmp.u20RiBSdgp ++ rm /tmp/tmp.Gp8ktMNuhi /tmp/tmp.u20RiBSdgp ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZmrvQcUV89 ++ mktemp + local LAST_ERR=/tmp/tmp.qIG49C4vtY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZmrvQcUV89 + cat /tmp/tmp.qIG49C4vtY + rm /tmp/tmp.ZmrvQcUV89 /tmp/tmp.qIG49C4vtY + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find-2nd.json /tmp/tmp.xOEYGWTFTX/find-2nd + run_restore backup-nfs-logical + local backup_name=backup-nfs-logical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-logical' + set +o xtrace [2026-01-23T11:55:28+0000] running restore restore-backup-nfs-logical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-logical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.wiYYkyFabj ++ mktemp + local LAST_ERR=/tmp/tmp.VN2rxY2nJ3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wiYYkyFabj perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical created + cat /tmp/tmp.VN2rxY2nJ3 + rm /tmp/tmp.wiYYkyFabj /tmp/tmp.VN2rxY2nJ3 + return 0 + wait_restore backup-nfs-logical some-name + local backup_name=backup-nfs-logical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WgvDEMoXkB +++ mktemp ++ local LAST_ERR=/tmp/tmp.8g6Nohhfv6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WgvDEMoXkB ++ cat /tmp/tmp.8g6Nohhfv6 ++ rm /tmp/tmp.WgvDEMoXkB /tmp/tmp.8g6Nohhfv6 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T11:55:50+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K6ctqMWPWC +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qol25EGGxW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K6ctqMWPWC ++ cat /tmp/tmp.Qol25EGGxW ++ rm /tmp/tmp.K6ctqMWPWC /tmp/tmp.Qol25EGGxW ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NwWLk8KASw ++ mktemp + local LAST_ERR=/tmp/tmp.DoWZ005CDZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NwWLk8KASw + cat /tmp/tmp.DoWZ005CDZ + rm /tmp/tmp.NwWLk8KASw /tmp/tmp.DoWZ005CDZ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.xOEYGWTFTX/find + desc 'CASE 2: Logical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: Logical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical-pitr + run_backup nfs backup-nfs-logical-pitr logical + local storage=nfs + local backup_name=backup-nfs-logical-pitr + local type=logical + log 'running backup backup-nfs-logical-pitr' + set +o xtrace [2026-01-23T11:55:51+0000] running backup backup-nfs-logical-pitr + yq eval '.metadata.name = "backup-nfs-logical-pitr" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.bIrDpmk91Z ++ mktemp + local LAST_ERR=/tmp/tmp.nq9sa8yON7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bIrDpmk91Z perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr created + cat /tmp/tmp.nq9sa8yON7 + rm /tmp/tmp.bIrDpmk91Z /tmp/tmp.nq9sa8yON7 + return 0 + wait_backup backup-nfs-logical-pitr + local backup_name=backup-nfs-logical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-logical-pitr to reach ready state.......OK + write_data 100502 -3rd + local x=100502 + local find_prefix=-3rd + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bgDOliQBlW +++ mktemp ++ local LAST_ERR=/tmp/tmp.F5CaP50rbO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bgDOliQBlW ++ cat /tmp/tmp.F5CaP50rbO ++ rm /tmp/tmp.bgDOliQBlW /tmp/tmp.F5CaP50rbO ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.aEEmbQPfm5 ++ mktemp + local LAST_ERR=/tmp/tmp.kBJPcjikiu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aEEmbQPfm5 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8d5c7548-9029-49f9-bcab-4aea181047d8") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.kBJPcjikiu + rm /tmp/tmp.aEEmbQPfm5 /tmp/tmp.kBJPcjikiu + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T11:56:08+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M8ttiOa5LB +++ mktemp ++ local LAST_ERR=/tmp/tmp.EK71SvWeQ8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M8ttiOa5LB ++ cat /tmp/tmp.EK71SvWeQ8 ++ rm /tmp/tmp.M8ttiOa5LB /tmp/tmp.EK71SvWeQ8 ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CzCc8KxhWq ++ mktemp + local LAST_ERR=/tmp/tmp.qgCECbjjRT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CzCc8KxhWq + cat /tmp/tmp.qgCECbjjRT + rm /tmp/tmp.CzCc8KxhWq /tmp/tmp.qgCECbjjRT + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.xOEYGWTFTX/find-3rd + run_pitr_check backup-nfs-logical-pitr some-name -3rd + local backup=backup-nfs-logical-pitr + local cluster=some-name + local find_prefix=-3rd + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ONntsm5Z25 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qGNP7tK3O6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ONntsm5Z25 ++ cat /tmp/tmp.qGNP7tK3O6 ++ rm /tmp/tmp.ONntsm5Z25 /tmp/tmp.qGNP7tK3O6 ++ return 0 + local backup_last_write=1769169360 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TismFH7eH4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.C8FHz7TJTH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TismFH7eH4 +++ cat /tmp/tmp.C8FHz7TJTH +++ rm /tmp/tmp.TismFH7eH4 /tmp/tmp.C8FHz7TJTH +++ return 0 ++ echo null + local last_chunk=null + [[ null -gt 1769169360 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.a6DISWyRfh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.U3osQX0Lst +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.a6DISWyRfh +++ cat /tmp/tmp.U3osQX0Lst +++ rm /tmp/tmp.a6DISWyRfh /tmp/tmp.U3osQX0Lst +++ return 0 ++ echo null + last_chunk=null + retries=1 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769169360 ++ local timestamp=1769169360 +++ TZ=UTC +++ /usr/sbin/date -d@1769169360 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:56:00 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00)' + set +o xtrace [2026-01-23T11:56:15+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00) + sleep 10 + [[ null -gt 1769169360 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0LXf7o3nSt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LSbRFEoz8S +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0LXf7o3nSt +++ cat /tmp/tmp.LSbRFEoz8S +++ rm /tmp/tmp.0LXf7o3nSt /tmp/tmp.LSbRFEoz8S +++ return 0 ++ echo null + last_chunk=null + retries=2 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769169360 ++ local timestamp=1769169360 +++ TZ=UTC +++ /usr/sbin/date -d@1769169360 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:56:00 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00)' + set +o xtrace [2026-01-23T11:56:27+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00) + sleep 10 + [[ null -gt 1769169360 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.P0yBdndP59 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.B1vlfiNpwf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.P0yBdndP59 +++ cat /tmp/tmp.B1vlfiNpwf +++ rm /tmp/tmp.P0yBdndP59 /tmp/tmp.B1vlfiNpwf +++ return 0 ++ echo null + last_chunk=null + retries=3 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769169360 ++ local timestamp=1769169360 +++ TZ=UTC +++ /usr/sbin/date -d@1769169360 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:56:00 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00)' + set +o xtrace [2026-01-23T11:56:39+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00) + sleep 10 + [[ null -gt 1769169360 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.F00j6Yhzm8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VNAWbuzKEV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.F00j6Yhzm8 +++ cat /tmp/tmp.VNAWbuzKEV +++ rm /tmp/tmp.F00j6Yhzm8 /tmp/tmp.VNAWbuzKEV +++ return 0 ++ echo null + last_chunk=null + retries=4 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769169360 ++ local timestamp=1769169360 +++ TZ=UTC +++ /usr/sbin/date -d@1769169360 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:56:00 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00)' + set +o xtrace [2026-01-23T11:56:51+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00) + sleep 10 + [[ null -gt 1769169360 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QRV3gZszqW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aO0omwwyuX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QRV3gZszqW +++ cat /tmp/tmp.aO0omwwyuX +++ rm /tmp/tmp.QRV3gZszqW /tmp/tmp.aO0omwwyuX +++ return 0 ++ echo null + last_chunk=null + retries=5 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769169360 ++ local timestamp=1769169360 +++ TZ=UTC +++ /usr/sbin/date -d@1769169360 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:56:00 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00)' + set +o xtrace [2026-01-23T11:57:03+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-23 11:56:00) + sleep 10 + [[ null -gt 1769169360 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.G7p38Pk34Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DE1YQtQSLb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.G7p38Pk34Y +++ cat /tmp/tmp.DE1YQtQSLb +++ rm /tmp/tmp.G7p38Pk34Y /tmp/tmp.DE1YQtQSLb +++ return 0 ++ echo 1769169426 + last_chunk=1769169426 + retries=6 ++ format_date 1769169426 ++ local timestamp=1769169426 +++ TZ=UTC +++ /usr/sbin/date -d@1769169426 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:06 ++ format_date 1769169360 ++ local timestamp=1769169360 +++ TZ=UTC +++ /usr/sbin/date -d@1769169360 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:56:00 + log 'Waiting for last oplog chunk (2026-01-23 11:57:06) to be greater than last write (2026-01-23 11:56:00)' + set +o xtrace [2026-01-23T11:57:15+0000] Waiting for last oplog chunk (2026-01-23 11:57:06) to be greater than last write (2026-01-23 11:56:00) + sleep 10 + [[ 1769169426 -gt 1769169360 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++++ mktemp ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ local LAST_OUT=/tmp/tmp.3FlV4ffMsb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.10vmt6JtFs ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3FlV4ffMsb ++++ cat /tmp/tmp.10vmt6JtFs ++++ rm /tmp/tmp.3FlV4ffMsb /tmp/tmp.10vmt6JtFs ++++ return 0 +++ echo 1769169426 ++ format_date 1769169426 ++ local timestamp=1769169426 +++ TZ=UTC +++ /usr/sbin/date -d@1769169426 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:06 + local 'target_time=2026-01-23 11:57:06' + log 'dropping test collection' + set +o xtrace [2026-01-23T11:57:27+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.suypKVa0z8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9SiGIEpwDS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.suypKVa0z8 ++ cat /tmp/tmp.9SiGIEpwDS ++ rm /tmp/tmp.suypKVa0z8 /tmp/tmp.9SiGIEpwDS ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.GWIZQ4MhbW ++ mktemp + local LAST_ERR=/tmp/tmp.H7gaNfvNHw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GWIZQ4MhbW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6961f461-c5ed-475f-aefe-a56c0b644cef") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.H7gaNfvNHw + rm /tmp/tmp.GWIZQ4MhbW /tmp/tmp.H7gaNfvNHw + return 0 + log 'checking pitr... backup: backup-nfs-logical-pitr target: 2026-01-23 11:57:06' + set +o xtrace [2026-01-23T11:57:30+0000] checking pitr... backup: backup-nfs-logical-pitr target: 2026-01-23 11:57:06 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-logical-pitr"' + yq eval '.spec.backupName = "backup-nfs-logical-pitr"' + kubectl_bin apply -f - + yq eval '.spec.pitr.date = "2026-01-23 11:57:06"' ++ mktemp + local LAST_OUT=/tmp/tmp.YMEBKkczpV ++ mktemp + local LAST_ERR=/tmp/tmp.eaza9xQ78B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YMEBKkczpV perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical-pitr created + cat /tmp/tmp.eaza9xQ78B + rm /tmp/tmp.YMEBKkczpV /tmp/tmp.eaza9xQ78B + return 0 + wait_restore backup-nfs-logical-pitr some-name + local backup_name=backup-nfs-logical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical-pitr to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.deFIbchPBW +++ mktemp ++ local LAST_ERR=/tmp/tmp.rNT5ykGEOs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.deFIbchPBW ++ cat /tmp/tmp.rNT5ykGEOs ++ rm /tmp/tmp.deFIbchPBW /tmp/tmp.rNT5ykGEOs ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T11:57:58+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bgjYvr00dW +++ mktemp ++ local LAST_ERR=/tmp/tmp.CYVQVER5L9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bgjYvr00dW ++ cat /tmp/tmp.CYVQVER5L9 ++ rm /tmp/tmp.bgjYvr00dW /tmp/tmp.CYVQVER5L9 ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.LPnbrYZoP2 ++ mktemp + local LAST_ERR=/tmp/tmp.ezKcBCzPBs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LPnbrYZoP2 + cat /tmp/tmp.ezKcBCzPBs + rm /tmp/tmp.LPnbrYZoP2 /tmp/tmp.ezKcBCzPBs + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.xOEYGWTFTX/find-3rd + desc 'CASE 3: Physical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 3: Physical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical + run_backup nfs backup-nfs-physical physical + local storage=nfs + local backup_name=backup-nfs-physical + local type=physical + log 'running backup backup-nfs-physical' + set +o xtrace [2026-01-23T11:58:01+0000] running backup backup-nfs-physical + yq eval '.metadata.name = "backup-nfs-physical" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RXCwujdLcP ++ mktemp + local LAST_ERR=/tmp/tmp.pKQmCiBFiC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RXCwujdLcP perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical created + cat /tmp/tmp.pKQmCiBFiC + rm /tmp/tmp.RXCwujdLcP /tmp/tmp.pKQmCiBFiC + return 0 + wait_backup backup-nfs-physical + local backup_name=backup-nfs-physical + local target_state=ready + set +o xtrace waiting for backup-nfs-physical to reach ready state.......OK + run_recovery_check backup-nfs-physical some-name -4th -3rd + local backup=backup-nfs-physical + local cluster=some-name + local find_prefix_before=-4th + local find_prefix_after=-3rd + write_data 100501 -4th + local x=100501 + local find_prefix=-4th + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.29ctHUBtUA +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y0QSpnykdo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.29ctHUBtUA ++ cat /tmp/tmp.Y0QSpnykdo ++ rm /tmp/tmp.29ctHUBtUA /tmp/tmp.Y0QSpnykdo ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gMFSQvfKo3 ++ mktemp + local LAST_ERR=/tmp/tmp.SuRGWr9K5O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gMFSQvfKo3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f32adace-bc11-420d-bfc3-e2f5719d3242") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.SuRGWr9K5O + rm /tmp/tmp.gMFSQvfKo3 /tmp/tmp.SuRGWr9K5O + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 -4th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix=-4th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T11:58:17+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5kGKBt7IpO +++ mktemp ++ local LAST_ERR=/tmp/tmp.RkuTTqtqOD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5kGKBt7IpO ++ cat /tmp/tmp.RkuTTqtqOD ++ rm /tmp/tmp.5kGKBt7IpO /tmp/tmp.RkuTTqtqOD ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vIycC4gCdV ++ mktemp + local LAST_ERR=/tmp/tmp.nE9dzZkVh7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vIycC4gCdV + cat /tmp/tmp.nE9dzZkVh7 + rm /tmp/tmp.vIycC4gCdV /tmp/tmp.nE9dzZkVh7 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find-4th.json /tmp/tmp.xOEYGWTFTX/find-4th + run_restore backup-nfs-physical + local backup_name=backup-nfs-physical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-physical' + set +o xtrace [2026-01-23T11:58:20+0000] running restore restore-backup-nfs-physical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-physical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.0VycTOSFzA ++ mktemp + local LAST_ERR=/tmp/tmp.EMmxlYheko + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0VycTOSFzA perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical created + cat /tmp/tmp.EMmxlYheko + rm /tmp/tmp.0VycTOSFzA /tmp/tmp.EMmxlYheko + return 0 + wait_restore backup-nfs-physical some-name + local backup_name=backup-nfs-physical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical to reach state "ready" ....OK after 3 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZcTxRAK6Wz +++ mktemp ++ local LAST_ERR=/tmp/tmp.DI4YvLspvF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZcTxRAK6Wz ++ cat /tmp/tmp.DI4YvLspvF ++ rm /tmp/tmp.ZcTxRAK6Wz /tmp/tmp.DI4YvLspvF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VBTSjw5kE8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UvU9cGPGKd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VBTSjw5kE8 ++ cat /tmp/tmp.UvU9cGPGKd ++ rm /tmp/tmp.VBTSjw5kE8 /tmp/tmp.UvU9cGPGKd ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lMslZ5FRxP +++ mktemp ++ local LAST_ERR=/tmp/tmp.dfzHljghGj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lMslZ5FRxP ++ cat /tmp/tmp.dfzHljghGj ++ rm /tmp/tmp.lMslZ5FRxP /tmp/tmp.dfzHljghGj ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9ljrJm8joK +++ mktemp ++ local LAST_ERR=/tmp/tmp.5lU3MlfJDc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9ljrJm8joK ++ cat /tmp/tmp.5lU3MlfJDc ++ rm /tmp/tmp.9ljrJm8joK /tmp/tmp.5lU3MlfJDc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g7wznC2NY8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6E7op1GxAC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g7wznC2NY8 ++ cat /tmp/tmp.6E7op1GxAC ++ rm /tmp/tmp.g7wznC2NY8 /tmp/tmp.6E7op1GxAC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rag08VJJVO +++ mktemp ++ local LAST_ERR=/tmp/tmp.nHzzLml4Ws ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rag08VJJVO ++ cat /tmp/tmp.nHzzLml4Ws ++ rm /tmp/tmp.rag08VJJVO /tmp/tmp.nHzzLml4Ws ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.59o2SWAd8E +++ mktemp ++ local LAST_ERR=/tmp/tmp.liqPl5vsck ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.59o2SWAd8E ++ cat /tmp/tmp.liqPl5vsck ++ rm /tmp/tmp.59o2SWAd8E /tmp/tmp.liqPl5vsck ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wYeDb4cgZn +++ mktemp ++ local LAST_ERR=/tmp/tmp.EC6utTGhNf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wYeDb4cgZn ++ cat /tmp/tmp.EC6utTGhNf ++ rm /tmp/tmp.wYeDb4cgZn /tmp/tmp.EC6utTGhNf ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T12:03:52+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G0mtcNqYtK +++ mktemp ++ local LAST_ERR=/tmp/tmp.YtV6slWsp5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G0mtcNqYtK ++ cat /tmp/tmp.YtV6slWsp5 ++ rm /tmp/tmp.G0mtcNqYtK /tmp/tmp.YtV6slWsp5 ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.eU1e79aCCo ++ mktemp + local LAST_ERR=/tmp/tmp.17NvNsOeHU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eU1e79aCCo + cat /tmp/tmp.17NvNsOeHU + rm /tmp/tmp.eU1e79aCCo /tmp/tmp.17NvNsOeHU + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.xOEYGWTFTX/find-3rd + desc 'CASE 4: Physical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 4: Physical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical-pitr + run_backup nfs backup-nfs-physical-pitr physical + local storage=nfs + local backup_name=backup-nfs-physical-pitr + local type=physical + log 'running backup backup-nfs-physical-pitr' + set +o xtrace [2026-01-23T12:03:55+0000] running backup backup-nfs-physical-pitr + yq eval '.metadata.name = "backup-nfs-physical-pitr" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.EaRTbBOuXG ++ mktemp + local LAST_ERR=/tmp/tmp.KIgR1O48sq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EaRTbBOuXG perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr created + cat /tmp/tmp.KIgR1O48sq + rm /tmp/tmp.EaRTbBOuXG /tmp/tmp.KIgR1O48sq + return 0 + wait_backup backup-nfs-physical-pitr + local backup_name=backup-nfs-physical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-physical-pitr to reach ready state......OK + write_data 100503 -5th + local x=100503 + local find_prefix=-5th + run_mongo 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nRMaaH6nbJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.2tlk22vdQz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nRMaaH6nbJ ++ cat /tmp/tmp.2tlk22vdQz ++ rm /tmp/tmp.nRMaaH6nbJ /tmp/tmp.2tlk22vdQz ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1RyIFVPnfc ++ mktemp + local LAST_ERR=/tmp/tmp.1Nc2ynOZQO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1RyIFVPnfc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("fff879c4-df48-46d1-8f1d-6565848fee40") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.1Nc2ynOZQO + rm /tmp/tmp.1RyIFVPnfc /tmp/tmp.1Nc2ynOZQO + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T12:04:12+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZAyFcKqENQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7po1YINr2X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZAyFcKqENQ ++ cat /tmp/tmp.7po1YINr2X ++ rm /tmp/tmp.ZAyFcKqENQ /tmp/tmp.7po1YINr2X ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.njEynmQBbm ++ mktemp + local LAST_ERR=/tmp/tmp.cPosg6wynO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.njEynmQBbm + cat /tmp/tmp.cPosg6wynO + rm /tmp/tmp.njEynmQBbm /tmp/tmp.cPosg6wynO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.xOEYGWTFTX/find-5th + run_pitr_check backup-nfs-physical-pitr some-name -5th + local backup=backup-nfs-physical-pitr + local cluster=some-name + local find_prefix=-5th + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gdgiZ6vmAF +++ mktemp ++ local LAST_ERR=/tmp/tmp.iDbu9689FY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gdgiZ6vmAF ++ cat /tmp/tmp.iDbu9689FY ++ rm /tmp/tmp.gdgiZ6vmAF /tmp/tmp.iDbu9689FY ++ return 0 + local backup_last_write=1769169840 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.50FhzQNSjA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kDedzGEcAy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.50FhzQNSjA +++ cat /tmp/tmp.kDedzGEcAy +++ rm /tmp/tmp.50FhzQNSjA /tmp/tmp.kDedzGEcAy +++ return 0 ++ echo 1769169456 + local last_chunk=1769169456 + [[ 1769169456 -gt 1769169840 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VIH3DbbQOq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aStzQv59h0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.VIH3DbbQOq +++ cat /tmp/tmp.aStzQv59h0 +++ rm /tmp/tmp.VIH3DbbQOq /tmp/tmp.aStzQv59h0 +++ return 0 ++ echo 1769169456 + last_chunk=1769169456 + retries=1 ++ format_date 1769169456 ++ local timestamp=1769169456 +++ TZ=UTC +++ /usr/sbin/date -d@1769169456 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:36 ++ format_date 1769169840 ++ local timestamp=1769169840 +++ TZ=UTC +++ /usr/sbin/date -d@1769169840 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:04:00 + log 'Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00)' + set +o xtrace [2026-01-23T12:04:20+0000] Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00) + sleep 10 + [[ 1769169456 -gt 1769169840 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aLZLZ7ENS6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZqPB9WrueP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aLZLZ7ENS6 +++ cat /tmp/tmp.ZqPB9WrueP +++ rm /tmp/tmp.aLZLZ7ENS6 /tmp/tmp.ZqPB9WrueP +++ return 0 ++ echo 1769169456 + last_chunk=1769169456 + retries=2 ++ format_date 1769169456 ++ local timestamp=1769169456 +++ TZ=UTC +++ /usr/sbin/date -d@1769169456 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:36 ++ format_date 1769169840 ++ local timestamp=1769169840 +++ TZ=UTC +++ /usr/sbin/date -d@1769169840 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:04:00 + log 'Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00)' + set +o xtrace [2026-01-23T12:04:32+0000] Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00) + sleep 10 + [[ 1769169456 -gt 1769169840 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.j01zIzbAUL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pDw9IL5lgS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.j01zIzbAUL +++ cat /tmp/tmp.pDw9IL5lgS +++ rm /tmp/tmp.j01zIzbAUL /tmp/tmp.pDw9IL5lgS +++ return 0 ++ echo 1769169456 + last_chunk=1769169456 + retries=3 ++ format_date 1769169456 ++ local timestamp=1769169456 +++ TZ=UTC +++ /usr/sbin/date -d@1769169456 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:36 ++ format_date 1769169840 ++ local timestamp=1769169840 +++ TZ=UTC +++ /usr/sbin/date -d@1769169840 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:04:00 + log 'Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00)' + set +o xtrace [2026-01-23T12:04:44+0000] Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00) + sleep 10 + [[ 1769169456 -gt 1769169840 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7KUlGdwBA0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lvQV5kl9CD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7KUlGdwBA0 +++ cat /tmp/tmp.lvQV5kl9CD +++ rm /tmp/tmp.7KUlGdwBA0 /tmp/tmp.lvQV5kl9CD +++ return 0 ++ echo 1769169456 + last_chunk=1769169456 + retries=4 ++ format_date 1769169456 ++ local timestamp=1769169456 +++ TZ=UTC +++ /usr/sbin/date -d@1769169456 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:36 ++ format_date 1769169840 ++ local timestamp=1769169840 +++ TZ=UTC +++ /usr/sbin/date -d@1769169840 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:04:00 + log 'Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00)' + set +o xtrace [2026-01-23T12:04:56+0000] Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00) + sleep 10 + [[ 1769169456 -gt 1769169840 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FQnNHOMswM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.v8FIpQ9o1r +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FQnNHOMswM +++ cat /tmp/tmp.v8FIpQ9o1r +++ rm /tmp/tmp.FQnNHOMswM /tmp/tmp.v8FIpQ9o1r +++ return 0 ++ echo 1769169456 + last_chunk=1769169456 + retries=5 ++ format_date 1769169456 ++ local timestamp=1769169456 +++ TZ=UTC +++ /usr/sbin/date -d@1769169456 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:36 ++ format_date 1769169840 ++ local timestamp=1769169840 +++ TZ=UTC +++ /usr/sbin/date -d@1769169840 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:04:00 + log 'Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00)' + set +o xtrace [2026-01-23T12:05:07+0000] Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00) + sleep 10 + [[ 1769169456 -gt 1769169840 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fFM3mR7UT6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7RZUHevCLv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fFM3mR7UT6 +++ cat /tmp/tmp.7RZUHevCLv +++ rm /tmp/tmp.fFM3mR7UT6 /tmp/tmp.7RZUHevCLv +++ return 0 ++ echo 1769169456 + last_chunk=1769169456 + retries=6 ++ format_date 1769169456 ++ local timestamp=1769169456 +++ TZ=UTC +++ /usr/sbin/date -d@1769169456 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 11:57:36 ++ format_date 1769169840 ++ local timestamp=1769169840 +++ TZ=UTC +++ /usr/sbin/date -d@1769169840 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:04:00 + log 'Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00)' + set +o xtrace [2026-01-23T12:05:19+0000] Waiting for last oplog chunk (2026-01-23 11:57:36) to be greater than last write (2026-01-23 12:04:00) + sleep 10 + [[ 1769169456 -gt 1769169840 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IgREcrHKiO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lBr5qSWfJV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IgREcrHKiO +++ cat /tmp/tmp.lBr5qSWfJV +++ rm /tmp/tmp.IgREcrHKiO /tmp/tmp.lBr5qSWfJV +++ return 0 ++ echo 1769169925 + last_chunk=1769169925 + retries=7 ++ format_date 1769169925 ++ local timestamp=1769169925 +++ TZ=UTC +++ /usr/sbin/date -d@1769169925 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:05:25 ++ format_date 1769169840 ++ local timestamp=1769169840 +++ TZ=UTC +++ /usr/sbin/date -d@1769169840 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:04:00 + log 'Waiting for last oplog chunk (2026-01-23 12:05:25) to be greater than last write (2026-01-23 12:04:00)' + set +o xtrace [2026-01-23T12:05:32+0000] Waiting for last oplog chunk (2026-01-23 12:05:25) to be greater than last write (2026-01-23 12:04:00) + sleep 10 + [[ 1769169925 -gt 1769169840 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qjS7bRwqhO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.OiqZVBbqM1 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.qjS7bRwqhO ++++ cat /tmp/tmp.OiqZVBbqM1 ++++ rm /tmp/tmp.qjS7bRwqhO /tmp/tmp.OiqZVBbqM1 ++++ return 0 +++ echo 1769169925 ++ format_date 1769169925 ++ local timestamp=1769169925 +++ TZ=UTC +++ /usr/sbin/date -d@1769169925 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-23 12:05:25 + local 'target_time=2026-01-23 12:05:25' + log 'dropping test collection' + set +o xtrace [2026-01-23T12:05:44+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L0FiFTJwWg +++ mktemp ++ local LAST_ERR=/tmp/tmp.2pXzZ1ALRZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L0FiFTJwWg ++ cat /tmp/tmp.2pXzZ1ALRZ ++ rm /tmp/tmp.L0FiFTJwWg /tmp/tmp.2pXzZ1ALRZ ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0Wwisq3jpd ++ mktemp + local LAST_ERR=/tmp/tmp.6pWc39VfRO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0Wwisq3jpd Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-16203.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("73d532ed-9307-4d64-b773-982e0b4099f3") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.6pWc39VfRO + rm /tmp/tmp.0Wwisq3jpd /tmp/tmp.6pWc39VfRO + return 0 + log 'checking pitr... backup: backup-nfs-physical-pitr target: 2026-01-23 12:05:25' + set +o xtrace [2026-01-23T12:05:47+0000] checking pitr... backup: backup-nfs-physical-pitr target: 2026-01-23 12:05:25 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-physical-pitr"' + yq eval '.spec.backupName = "backup-nfs-physical-pitr"' + yq eval '.spec.pitr.date = "2026-01-23 12:05:25"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.f0ufdEDz0U ++ mktemp + local LAST_ERR=/tmp/tmp.RjEEX5wruk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f0ufdEDz0U perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical-pitr created + cat /tmp/tmp.RjEEX5wruk + rm /tmp/tmp.f0ufdEDz0U /tmp/tmp.RjEEX5wruk + return 0 + wait_restore backup-nfs-physical-pitr some-name + local backup_name=backup-nfs-physical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical-pitr to reach state "ready" ......OK after 5 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PIsVf5gQcD +++ mktemp ++ local LAST_ERR=/tmp/tmp.1G6QyDjXar ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PIsVf5gQcD ++ cat /tmp/tmp.1G6QyDjXar ++ rm /tmp/tmp.PIsVf5gQcD /tmp/tmp.1G6QyDjXar ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JKlv6h4GpD +++ mktemp ++ local LAST_ERR=/tmp/tmp.WmZRKytt5l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JKlv6h4GpD ++ cat /tmp/tmp.WmZRKytt5l ++ rm /tmp/tmp.JKlv6h4GpD /tmp/tmp.WmZRKytt5l ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0umI1mU7ph +++ mktemp ++ local LAST_ERR=/tmp/tmp.RgGmFgSBrg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0umI1mU7ph ++ cat /tmp/tmp.RgGmFgSBrg ++ rm /tmp/tmp.0umI1mU7ph /tmp/tmp.RgGmFgSBrg ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OAvxTM1BVi +++ mktemp ++ local LAST_ERR=/tmp/tmp.g0kWo07yIg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OAvxTM1BVi ++ cat /tmp/tmp.g0kWo07yIg ++ rm /tmp/tmp.OAvxTM1BVi /tmp/tmp.g0kWo07yIg ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BPAgd6Iu1O +++ mktemp ++ local LAST_ERR=/tmp/tmp.GqBOp8vECH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BPAgd6Iu1O ++ cat /tmp/tmp.GqBOp8vECH ++ rm /tmp/tmp.BPAgd6Iu1O /tmp/tmp.GqBOp8vECH ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ghswgq7vkE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Amyao4DTwW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ghswgq7vkE ++ cat /tmp/tmp.Amyao4DTwW ++ rm /tmp/tmp.ghswgq7vkE /tmp/tmp.Amyao4DTwW ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MoYtyZ7vkJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.TngjbzTOop ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MoYtyZ7vkJ ++ cat /tmp/tmp.TngjbzTOop ++ rm /tmp/tmp.MoYtyZ7vkJ /tmp/tmp.TngjbzTOop ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C8TEoviutk +++ mktemp ++ local LAST_ERR=/tmp/tmp.m4ndvG418y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C8TEoviutk ++ cat /tmp/tmp.m4ndvG418y ++ rm /tmp/tmp.C8TEoviutk /tmp/tmp.m4ndvG418y ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.focdtsvZLr +++ mktemp ++ local LAST_ERR=/tmp/tmp.LS9uP4H991 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.focdtsvZLr ++ cat /tmp/tmp.LS9uP4H991 ++ rm /tmp/tmp.focdtsvZLr /tmp/tmp.LS9uP4H991 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4pGvWA88Zv +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gxg1OnEauL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4pGvWA88Zv ++ cat /tmp/tmp.Gxg1OnEauL ++ rm /tmp/tmp.4pGvWA88Zv /tmp/tmp.Gxg1OnEauL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OHMF3exv08 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gCjb3dOgCr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OHMF3exv08 ++ cat /tmp/tmp.gCjb3dOgCr ++ rm /tmp/tmp.OHMF3exv08 /tmp/tmp.gCjb3dOgCr ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-16203 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-23T12:12:59+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-16203 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-16203 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-16203 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VFYyL1fuGH +++ mktemp ++ local LAST_ERR=/tmp/tmp.ljyd1KPMkw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VFYyL1fuGH ++ cat /tmp/tmp.ljyd1KPMkw ++ rm /tmp/tmp.VFYyL1fuGH /tmp/tmp.ljyd1KPMkw ++ return 0 + local client_container=psmdb-client-696897d69b-ww6c5 + kubectl_bin exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Oe2EhUjqZO ++ mktemp + local LAST_ERR=/tmp/tmp.StWxUXaqbM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-ww6c5 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-16203.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Oe2EhUjqZO + cat /tmp/tmp.StWxUXaqbM + rm /tmp/tmp.Oe2EhUjqZO /tmp/tmp.StWxUXaqbM + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.xOEYGWTFTX/find-5th + destroy demand-backup-fs-16203 + local namespace=demand-backup-fs-16203 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.17UIqUrSl9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OF8ESlm3sE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.17UIqUrSl9 ++ cat /tmp/tmp.OF8ESlm3sE ++ rm /tmp/tmp.17UIqUrSl9 /tmp/tmp.OF8ESlm3sE ++ return 0 + '[' 4 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.fsoSMTtkBt ++ mktemp + local LAST_ERR=/tmp/tmp.N9GfD4162T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fsoSMTtkBt NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-nfs-logical some-name nfs /mnt/nfs/2026-01-23T11:55:11Z logical 38.32KB ready 17m 17m backup-nfs-logical-pitr some-name nfs /mnt/nfs/2026-01-23T11:55:54Z logical 42.24KB ready 17m 17m backup-nfs-physical some-name nfs /mnt/nfs/2026-01-23T11:58:03Z physical 1.36MB ready 14m 15m backup-nfs-physical-pitr some-name nfs /mnt/nfs/2026-01-23T12:03:58Z physical 874.09KB ready 8m58s 9m6s + cat /tmp/tmp.N9GfD4162T + rm /tmp/tmp.fsoSMTtkBt /tmp/tmp.N9GfD4162T + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.9rhHjc9t0D ++ mktemp + local LAST_ERR=/tmp/tmp.nHdfEXSb27 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9rhHjc9t0D perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical" deleted from demand-backup-fs-16203 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical-pitr" deleted from demand-backup-fs-16203 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical" deleted from demand-backup-fs-16203 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical-pitr" deleted from demand-backup-fs-16203 namespace + cat /tmp/tmp.nHdfEXSb27 + rm /tmp/tmp.9rhHjc9t0D /tmp/tmp.nHdfEXSb27 + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.IqjDhli5Zw ++ mktemp + local LAST_ERR=/tmp/tmp.RJVq3LD4SU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IqjDhli5Zw customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.RJVq3LD4SU + rm /tmp/tmp.IqjDhli5Zw /tmp/tmp.RJVq3LD4SU + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jIJU3tlmCJ ++ mktemp + local LAST_ERR=/tmp/tmp.GUeKnlp5Uf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jIJU3tlmCJ + cat /tmp/tmp.GUeKnlp5Uf + rm /tmp/tmp.jIJU3tlmCJ /tmp/tmp.GUeKnlp5Uf + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.zVGzj7RLaL ++ mktemp + local LAST_ERR=/tmp/tmp.cq4WNZhkMt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zVGzj7RLaL + cat /tmp/tmp.cq4WNZhkMt + rm /tmp/tmp.zVGzj7RLaL /tmp/tmp.cq4WNZhkMt + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-16203 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.VzoEWeWQAq ++ mktemp + local LAST_ERR=/tmp/tmp.ID5XkdpOHi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VzoEWeWQAq customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.ID5XkdpOHi + rm /tmp/tmp.VzoEWeWQAq /tmp/tmp.ID5XkdpOHi + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.PupmpexSK8 ++ mktemp + local LAST_ERR=/tmp/tmp.hC8Xm0FIIb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2213/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PupmpexSK8 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.hC8Xm0FIIb + rm /tmp/tmp.PupmpexSK8 /tmp/tmp.hC8Xm0FIIb + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.eZaMfIRm5t ++ mktemp + local LAST_ERR=/tmp/tmp.UL05yzkCgv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eZaMfIRm5t + cat /tmp/tmp.UL05yzkCgv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eZaMfIRm5t + cat /tmp/tmp.UL05yzkCgv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eZaMfIRm5t + cat /tmp/tmp.UL05yzkCgv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.eZaMfIRm5t + cat /tmp/tmp.UL05yzkCgv Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.eZaMfIRm5t /tmp/tmp.UL05yzkCgv + return 1 + true + '[' -n '' ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-fs-16203 + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + rm -rf /tmp/tmp.xOEYGWTFTX ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.RY0AG2FJGu ++ mktemp + local LAST_OUT=/tmp/tmp.sZrUBJtCRH + local LAST_ERR=/tmp/tmp.SzKibnRZhc + local exit_status=0 + local timeout=4 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + local LAST_ERR=/tmp/tmp.exASuPMODJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-fs-16203