Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/logs/demand-backup-fs.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 + cluster=some-name + create_infra demand-backup-fs-8637 + local ns=demand-backup-fs-8637 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.juihJvAlN4 ++ mktemp + local LAST_ERR=/tmp/tmp.Bg4BUVmPCK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.juihJvAlN4 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Bg4BUVmPCK + rm /tmp/tmp.juihJvAlN4 /tmp/tmp.Bg4BUVmPCK + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-17523 backup-nfs-logical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-17523 backup-nfs-logical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-17523 backup-nfs-physical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-17523 backup-nfs-physical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.BtHgkQZ2eJ ++ mktemp + local LAST_ERR=/tmp/tmp.knw8f3gTjD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BtHgkQZ2eJ customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.knw8f3gTjD + rm /tmp/tmp.BtHgkQZ2eJ /tmp/tmp.knw8f3gTjD + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.npEuDxhV8p ++ mktemp + local LAST_ERR=/tmp/tmp.75I9aH51gn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.npEuDxhV8p + cat /tmp/tmp.75I9aH51gn + rm /tmp/tmp.npEuDxhV8p /tmp/tmp.75I9aH51gn + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-17523 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.coNlwcUcOp ++ mktemp + local LAST_ERR=/tmp/tmp.DseyvAdwxD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.coNlwcUcOp customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.DseyvAdwxD + rm /tmp/tmp.coNlwcUcOp /tmp/tmp.DseyvAdwxD + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.hC2uyD2MgM ++ mktemp + local LAST_ERR=/tmp/tmp.fJWCGp9RlS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hC2uyD2MgM clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.fJWCGp9RlS + rm /tmp/tmp.hC2uyD2MgM /tmp/tmp.fJWCGp9RlS + return 0 + check_crd_for_deletion PR-2044-1a2e24e2 + local git_tag=PR-2044-1a2e24e2 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2044-1a2e24e2/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N1PCzrtPbB +++ mktemp ++ local LAST_ERR=/tmp/tmp.6gk60lpROz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.N1PCzrtPbB ++ cat /tmp/tmp.6gk60lpROz Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.N1PCzrtPbB ++ cat /tmp/tmp.6gk60lpROz Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.N1PCzrtPbB ++ cat /tmp/tmp.6gk60lpROz Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.N1PCzrtPbB ++ cat /tmp/tmp.6gk60lpROz Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.N1PCzrtPbB /tmp/tmp.6gk60lpROz ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace + awk '{print$1}' + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.xZckFZpJ0Y egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.nz7fz9H9OB + local LAST_ERR=/tmp/tmp.VQr7cX7RPP + local exit_status=0 ++ mktemp + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.f53mkxGhmr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xZckFZpJ0Y + cat /tmp/tmp.VQr7cX7RPP + rm /tmp/tmp.xZckFZpJ0Y /tmp/tmp.VQr7cX7RPP + return 0 namespace "demand-backup-fs-17523" deleted namespace "storage" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nz7fz9H9OB namespace "psmdb-operator" deleted + cat /tmp/tmp.f53mkxGhmr + rm /tmp/tmp.nz7fz9H9OB /tmp/tmp.f53mkxGhmr + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.CkKNUfoXCm ++ mktemp + local LAST_ERR=/tmp/tmp.7i1o10FH9q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CkKNUfoXCm + cat /tmp/tmp.7i1o10FH9q + rm /tmp/tmp.CkKNUfoXCm /tmp/tmp.7i1o10FH9q + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.GzkHALFTOc ++ mktemp + local LAST_ERR=/tmp/tmp.TGjp5zxEAN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GzkHALFTOc namespace/psmdb-operator created + cat /tmp/tmp.TGjp5zxEAN + rm /tmp/tmp.GzkHALFTOc /tmp/tmp.TGjp5zxEAN + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WPKg5pRAyR +++ mktemp ++ local LAST_ERR=/tmp/tmp.2pEbS9rzyH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WPKg5pRAyR ++ cat /tmp/tmp.2pEbS9rzyH ++ rm /tmp/tmp.WPKg5pRAyR /tmp/tmp.2pEbS9rzyH ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-1a2e24e2-3-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.C5aO53B1NK ++ mktemp + local LAST_ERR=/tmp/tmp.wFSZmnur5H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-1a2e24e2-3-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C5aO53B1NK Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-1a2e24e2-3-cluster4" modified. + cat /tmp/tmp.wFSZmnur5H + rm /tmp/tmp.C5aO53B1NK /tmp/tmp.wFSZmnur5H + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.HJxKpCPHpQ ++ mktemp + local LAST_ERR=/tmp/tmp.4S8ehxzYHw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HJxKpCPHpQ customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.4S8ehxzYHw + rm /tmp/tmp.HJxKpCPHpQ /tmp/tmp.4S8ehxzYHw + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.iutKX2txe9 ++ mktemp + local LAST_ERR=/tmp/tmp.ixZCU2PVCE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iutKX2txe9 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ixZCU2PVCE + rm /tmp/tmp.iutKX2txe9 /tmp/tmp.ixZCU2PVCE + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2044-1a2e24e2") | + kubectl_bin apply -f - ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.aOTzMSbEr4 ++ mktemp + local LAST_ERR=/tmp/tmp.gBFgVUayxf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aOTzMSbEr4 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.gBFgVUayxf + rm /tmp/tmp.aOTzMSbEr4 /tmp/tmp.gBFgVUayxf + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.bLFjUt80sY +++ mktemp ++ local LAST_ERR=/tmp/tmp.ibamQvMB9E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bLFjUt80sY ++ cat /tmp/tmp.ibamQvMB9E ++ rm /tmp/tmp.bLFjUt80sY /tmp/tmp.ibamQvMB9E ++ return 0 + wait_pod percona-server-mongodb-operator-7cb95d4f6d-wjhzp + local pod=percona-server-mongodb-operator-7cb95d4f6d-wjhzp + set +o xtrace waiting for pod/percona-server-mongodb-operator-7cb95d4f6d-wjhzp to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tz9u48wczQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.MtXugXU9o4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tz9u48wczQ ++ cat /tmp/tmp.MtXugXU9o4 ++ rm /tmp/tmp.Tz9u48wczQ /tmp/tmp.MtXugXU9o4 ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-7cb95d4f6d-wjhzp ++ mktemp + local LAST_OUT=/tmp/tmp.8rXd7YFi4y ++ mktemp + local LAST_ERR=/tmp/tmp.r6gRPmrqOT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-7cb95d4f6d-wjhzp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8rXd7YFi4y + cat /tmp/tmp.r6gRPmrqOT + rm /tmp/tmp.8rXd7YFi4y /tmp/tmp.r6gRPmrqOT + return 0 2025-09-14T09:50:41.298Z INFO setup Manager starting up {"gitCommit": "1a2e24e21a69585ac8fcb6108c10381b376579c9", "gitBranch": "PR-2044-1a2e24e2", "buildTime": "", "goVersion": "go1.24.7", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-fs-8637 + local namespace=demand-backup-fs-8637 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + awk '{print$1}' + desc 'cleaned up old namespaces demand-backup-fs-8637' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-fs-8637 ----------------------------------------------------------------------------------- + xargs kubectl delete ns + kubectl_bin delete namespace demand-backup-fs-8637 --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.9UQFEEPxs3 egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.qwL9YzKasa ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.dFZkZWsI08 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.6UKWmgWye4 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-fs-8637 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9UQFEEPxs3 + cat /tmp/tmp.dFZkZWsI08 + rm /tmp/tmp.9UQFEEPxs3 /tmp/tmp.dFZkZWsI08 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qwL9YzKasa + cat /tmp/tmp.6UKWmgWye4 + rm /tmp/tmp.qwL9YzKasa /tmp/tmp.6UKWmgWye4 + return 0 + kubectl_bin wait --for=delete namespace demand-backup-fs-8637 ++ mktemp + local LAST_OUT=/tmp/tmp.hrlYwTmsfS ++ mktemp + local LAST_ERR=/tmp/tmp.YjJH3Acp6g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-fs-8637 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hrlYwTmsfS + cat /tmp/tmp.YjJH3Acp6g + rm /tmp/tmp.hrlYwTmsfS /tmp/tmp.YjJH3Acp6g + return 0 + desc 'create namespace demand-backup-fs-8637' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-fs-8637 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-fs-8637 ++ mktemp + local LAST_OUT=/tmp/tmp.sWM2GRfjRX ++ mktemp + local LAST_ERR=/tmp/tmp.6hoipp3l5w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-fs-8637 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sWM2GRfjRX namespace/demand-backup-fs-8637 created + cat /tmp/tmp.6hoipp3l5w + rm /tmp/tmp.sWM2GRfjRX /tmp/tmp.6hoipp3l5w + return 0 + set_kube_ctx demand-backup-fs-8637 + local namespace=demand-backup-fs-8637 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.RhO64TJehE +++ mktemp ++ local LAST_ERR=/tmp/tmp.fPb5WJHZEj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RhO64TJehE ++ cat /tmp/tmp.fPb5WJHZEj ++ rm /tmp/tmp.RhO64TJehE /tmp/tmp.fPb5WJHZEj ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-1a2e24e2-3-cluster4 --namespace=demand-backup-fs-8637 ++ mktemp + local LAST_OUT=/tmp/tmp.c2WArhYIeZ ++ mktemp + local LAST_ERR=/tmp/tmp.LSSFOmVuOK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-1a2e24e2-3-cluster4 --namespace=demand-backup-fs-8637 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c2WArhYIeZ Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-1a2e24e2-3-cluster4" modified. + cat /tmp/tmp.LSSFOmVuOK + rm /tmp/tmp.c2WArhYIeZ /tmp/tmp.LSSFOmVuOK + return 0 + kubectl_bin delete ns storage ++ mktemp + local LAST_OUT=/tmp/tmp.r5lZyvEoxN ++ mktemp + local LAST_ERR=/tmp/tmp.0ekYBQwaqt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.r5lZyvEoxN + cat /tmp/tmp.0ekYBQwaqt Error from server (NotFound): namespaces "storage" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.r5lZyvEoxN + cat /tmp/tmp.0ekYBQwaqt Error from server (NotFound): namespaces "storage" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.r5lZyvEoxN + cat /tmp/tmp.0ekYBQwaqt Error from server (NotFound): namespaces "storage" not found + sleep 8 + cat /tmp/tmp.r5lZyvEoxN + cat /tmp/tmp.0ekYBQwaqt Error from server (NotFound): namespaces "storage" not found + rm /tmp/tmp.r5lZyvEoxN /tmp/tmp.0ekYBQwaqt + return 1 + : + [[ 1 != 1 ]] + uid=1001 + [[ -n '' ]] + log 'deploying NFS server' + set +o xtrace [2025-09-14T09:51:08+0000] deploying NFS server + deploy_nfs_server 1001 + local uid=1001 + kubectl_bin create namespace storage ++ mktemp + local LAST_OUT=/tmp/tmp.pR7LbtVZ4Y ++ mktemp + local LAST_ERR=/tmp/tmp.esyFOWSE1P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace storage + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pR7LbtVZ4Y namespace/storage created + cat /tmp/tmp.esyFOWSE1P + rm /tmp/tmp.pR7LbtVZ4Y /tmp/tmp.esyFOWSE1P + return 0 + kubectl_bin apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/nfs-server.yml ++ mktemp + local LAST_OUT=/tmp/tmp.yK9I1uyqov ++ mktemp + local LAST_ERR=/tmp/tmp.lXSulvbWzi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/nfs-server.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yK9I1uyqov serviceaccount/nfs-server created rolebinding.rbac.authorization.k8s.io/system:openshift:scc:privileged created persistentvolumeclaim/nfs-pvc created deployment.apps/nfs-server created service/nfs-service created + cat /tmp/tmp.lXSulvbWzi + rm /tmp/tmp.yK9I1uyqov /tmp/tmp.lXSulvbWzi + return 0 + sleep 5 ++ kubectl_bin get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ERQjZSwKV2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jEAIsppX9B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ERQjZSwKV2 ++ cat /tmp/tmp.jEAIsppX9B ++ rm /tmp/tmp.ERQjZSwKV2 /tmp/tmp.jEAIsppX9B ++ return 0 + local nfsPod=nfs-server-b799cbd49-2qd5g ++ kubectl_bin get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C6xYOIROR9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MYKqLlatEt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C6xYOIROR9 ++ cat /tmp/tmp.MYKqLlatEt ++ rm /tmp/tmp.C6xYOIROR9 /tmp/tmp.MYKqLlatEt ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-b799cbd49-2qd5g to start Running' + set +o xtrace [2025-09-14T09:51:20+0000] Waiting for nfs-server-b799cbd49-2qd5g to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XO3Q1H3Laf +++ mktemp ++ local LAST_ERR=/tmp/tmp.xHBPid0zTx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XO3Q1H3Laf ++ cat /tmp/tmp.xHBPid0zTx ++ rm /tmp/tmp.XO3Q1H3Laf /tmp/tmp.xHBPid0zTx ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-b799cbd49-2qd5g to start Running' + set +o xtrace [2025-09-14T09:51:22+0000] Waiting for nfs-server-b799cbd49-2qd5g to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WRknRtZmio +++ mktemp ++ local LAST_ERR=/tmp/tmp.rtXYgEGNbg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WRknRtZmio ++ cat /tmp/tmp.rtXYgEGNbg ++ rm /tmp/tmp.WRknRtZmio /tmp/tmp.rtXYgEGNbg ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-b799cbd49-2qd5g to start Running' + set +o xtrace [2025-09-14T09:51:24+0000] Waiting for nfs-server-b799cbd49-2qd5g to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PCmxtZw0wi +++ mktemp ++ local LAST_ERR=/tmp/tmp.FUZZ6wVGtx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PCmxtZw0wi ++ cat /tmp/tmp.FUZZ6wVGtx ++ rm /tmp/tmp.PCmxtZw0wi /tmp/tmp.FUZZ6wVGtx ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-b799cbd49-2qd5g to start Running' + set +o xtrace [2025-09-14T09:51:25+0000] Waiting for nfs-server-b799cbd49-2qd5g to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SVRRZE6SjW +++ mktemp ++ local LAST_ERR=/tmp/tmp.G2Zwct89hB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-b799cbd49-2qd5g -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SVRRZE6SjW ++ cat /tmp/tmp.G2Zwct89hB ++ rm /tmp/tmp.SVRRZE6SjW /tmp/tmp.G2Zwct89hB ++ return 0 + [[ Running == \R\u\n\n\i\n\g ]] + kubectl_bin exec -n storage nfs-server-b799cbd49-2qd5g -- mkdir /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.8icpbpZLkH ++ mktemp + local LAST_ERR=/tmp/tmp.2XICy5k4d7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-b799cbd49-2qd5g -- mkdir /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8icpbpZLkH + cat /tmp/tmp.2XICy5k4d7 + rm /tmp/tmp.8icpbpZLkH /tmp/tmp.2XICy5k4d7 + return 0 + kubectl_bin exec -n storage nfs-server-b799cbd49-2qd5g -- chown 1001:1001 /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.VN0GS8P8yN ++ mktemp + local LAST_ERR=/tmp/tmp.QbvVCMg7B9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-b799cbd49-2qd5g -- chown 1001:1001 /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VN0GS8P8yN + cat /tmp/tmp.QbvVCMg7B9 + rm /tmp/tmp.VN0GS8P8yN /tmp/tmp.QbvVCMg7B9 + return 0 + log 'creating secrets and start client' + set +o xtrace [2025-09-14T09:51:31+0000] creating secrets and start client + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.XGrYBtFEaa ++ mktemp + local LAST_ERR=/tmp/tmp.UYHGD9jzYa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XGrYBtFEaa secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.UYHGD9jzYa + rm /tmp/tmp.XGrYBtFEaa /tmp/tmp.UYHGD9jzYa + return 0 + [[ -n '' ]] + log 'creating PSMDB cluster some-name' + set +o xtrace [2025-09-14T09:51:33+0000] creating PSMDB cluster some-name + [[ 1 != 1 ]] + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2044-1a2e24e2"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.gyaGfZ5oQC ++ mktemp + local LAST_ERR=/tmp/tmp.NyXOg37T30 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gyaGfZ5oQC perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.NyXOg37T30 + rm /tmp/tmp.gyaGfZ5oQC /tmp/tmp.NyXOg37T30 + return 0 + log 'wait for all 3 pods to start' + set +o xtrace [2025-09-14T09:51:35+0000] wait for all 3 pods to start + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EPj7c9RrMS +++ mktemp ++ local LAST_ERR=/tmp/tmp.dEQZRw4psN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EPj7c9RrMS ++ cat /tmp/tmp.dEQZRw4psN ++ rm /tmp/tmp.EPj7c9RrMS /tmp/tmp.dEQZRw4psN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IEDA03uBeR +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nkrx9TzaHH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IEDA03uBeR ++ cat /tmp/tmp.Nkrx9TzaHH ++ rm /tmp/tmp.IEDA03uBeR /tmp/tmp.Nkrx9TzaHH ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yTivaW9AmQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.kV0uJpSGft ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yTivaW9AmQ ++ cat /tmp/tmp.kV0uJpSGft ++ rm /tmp/tmp.yTivaW9AmQ /tmp/tmp.kV0uJpSGft ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.... + [[ 1 == 1 ]] + log 'checking if statefulset created with expected config' + set +o xtrace [2025-09-14T09:53:10+0000] checking if statefulset created with expected config + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.U3WlJTolQa/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-fs-8637", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.82rgegND9Z ++ mktemp + local LAST_ERR=/tmp/tmp.9uphGpg1kl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.82rgegND9Z + cat /tmp/tmp.9uphGpg1kl + rm /tmp/tmp.82rgegND9Z /tmp/tmp.9uphGpg1kl + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.U3WlJTolQa/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.U3WlJTolQa/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.U3WlJTolQa/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml /tmp/tmp.U3WlJTolQa/statefulset_some-name-rs0.yml + log 'creating user' + set +o xtrace [2025-09-14T09:53:11+0000] creating user + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-8637 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3lY6j3JSzs +++ mktemp ++ local LAST_ERR=/tmp/tmp.cJKC5XxmvI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3lY6j3JSzs ++ cat /tmp/tmp.cJKC5XxmvI ++ rm /tmp/tmp.3lY6j3JSzs /tmp/tmp.cJKC5XxmvI ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.p5OPTPFFkS ++ mktemp + local LAST_ERR=/tmp/tmp.TrEWtYw86G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p5OPTPFFkS Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f2bec103-e01f-44a2-9d9c-0e06f373512e") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.TrEWtYw86G + rm /tmp/tmp.p5OPTPFFkS /tmp/tmp.TrEWtYw86G + return 0 + sleep 2 + log 'write initial data' + set +o xtrace [2025-09-14T09:53:15+0000] write initial data + write_data 100500 '' + local x=100500 + local find_prefix= + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V4A2b0MeBU +++ mktemp ++ local LAST_ERR=/tmp/tmp.5gONbDBOdC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V4A2b0MeBU ++ cat /tmp/tmp.5gONbDBOdC ++ rm /tmp/tmp.V4A2b0MeBU /tmp/tmp.5gONbDBOdC ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QFHeTHS3To ++ mktemp + local LAST_ERR=/tmp/tmp.hcua5Ln2fR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QFHeTHS3To Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0c18fc39-56d2-4edf-ba2d-22000fe6a73d") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.hcua5Ln2fR + rm /tmp/tmp.QFHeTHS3To /tmp/tmp.hcua5Ln2fR + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T09:53:18+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.RzjMNwVj06 +++ mktemp ++ local LAST_ERR=/tmp/tmp.95dOTLb5ge ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RzjMNwVj06 ++ cat /tmp/tmp.95dOTLb5ge ++ rm /tmp/tmp.RzjMNwVj06 /tmp/tmp.95dOTLb5ge ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.U0merCOI6Y ++ mktemp + local LAST_ERR=/tmp/tmp.72gZyz7pwG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U0merCOI6Y + cat /tmp/tmp.72gZyz7pwG + rm /tmp/tmp.U0merCOI6Y /tmp/tmp.72gZyz7pwG + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.U3WlJTolQa/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2025-09-14T09:53:03.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2025-09-14T09:53:09.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2025-09-14T09:53:10.000+0000 I listening for the commands + desc 'CASE 1: Logical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: Logical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical + run_backup nfs backup-nfs-logical logical + local storage=nfs + local backup_name=backup-nfs-logical + local type=logical + desc 'run backup backup-nfs-logical' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-logical ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-nfs-logical" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.TG8WyIfu8B ++ mktemp + local LAST_ERR=/tmp/tmp.inQeK7T4uA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TG8WyIfu8B perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical created + cat /tmp/tmp.inQeK7T4uA + rm /tmp/tmp.TG8WyIfu8B /tmp/tmp.inQeK7T4uA + return 0 + wait_backup backup-nfs-logical + local backup_name=backup-nfs-logical + local target_state=ready + set +o xtrace waiting for backup-nfs-logical to reach ready state..... + run_recovery_check backup-nfs-logical some-name -2nd '' + local backup=backup-nfs-logical + local cluster=some-name + local find_prefix_before=-2nd + local find_prefix_after= + write_data 100501 -2nd + local x=100501 + local find_prefix=-2nd + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4tpmjBxWME +++ mktemp ++ local LAST_ERR=/tmp/tmp.8MJulSJySJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4tpmjBxWME ++ cat /tmp/tmp.8MJulSJySJ ++ rm /tmp/tmp.4tpmjBxWME /tmp/tmp.8MJulSJySJ ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.VbucuY4S9b ++ mktemp + local LAST_ERR=/tmp/tmp.eL7elaw8rh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VbucuY4S9b Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("35d6f79e-844b-4c09-bd3b-e00abc1b7ba3") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.eL7elaw8rh + rm /tmp/tmp.VbucuY4S9b /tmp/tmp.eL7elaw8rh + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T09:53:42+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.iwPj81RNDw +++ mktemp ++ local LAST_ERR=/tmp/tmp.S89afBn0uq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iwPj81RNDw ++ cat /tmp/tmp.S89afBn0uq ++ rm /tmp/tmp.iwPj81RNDw /tmp/tmp.S89afBn0uq ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.msGTf1WDDE ++ mktemp + local LAST_ERR=/tmp/tmp.IC615AZozI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.msGTf1WDDE + cat /tmp/tmp.IC615AZozI + rm /tmp/tmp.msGTf1WDDE /tmp/tmp.IC615AZozI + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find-2nd.json /tmp/tmp.U3WlJTolQa/find-2nd + run_restore backup-nfs-logical + local backup_name=backup-nfs-logical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-logical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ByHua4qfEb ++ mktemp + local LAST_ERR=/tmp/tmp.Ohua50UWPf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ByHua4qfEb perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical created + cat /tmp/tmp.Ohua50UWPf + rm /tmp/tmp.ByHua4qfEb /tmp/tmp.Ohua50UWPf + return 0 + wait_restore backup-nfs-logical some-name + local backup_name=backup-nfs-logical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical object to be createdOK Waiting psmdb-restore/restore-backup-nfs-logical to reach state "ready" OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1eyXcflWSD +++ mktemp ++ local LAST_ERR=/tmp/tmp.uSYMyz3Brt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1eyXcflWSD ++ cat /tmp/tmp.uSYMyz3Brt ++ rm /tmp/tmp.1eyXcflWSD /tmp/tmp.uSYMyz3Brt ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T09:54:13+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.8mINpfdvCA +++ mktemp ++ local LAST_ERR=/tmp/tmp.kuUaU2K0z9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8mINpfdvCA ++ cat /tmp/tmp.kuUaU2K0z9 ++ rm /tmp/tmp.8mINpfdvCA /tmp/tmp.kuUaU2K0z9 ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.11dIjmsAoG ++ mktemp + local LAST_ERR=/tmp/tmp.KVAJsj84AK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.11dIjmsAoG + cat /tmp/tmp.KVAJsj84AK + rm /tmp/tmp.11dIjmsAoG /tmp/tmp.KVAJsj84AK + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.U3WlJTolQa/find + desc 'CASE 2: Logical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: Logical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical-pitr + run_backup nfs backup-nfs-logical-pitr logical + local storage=nfs + local backup_name=backup-nfs-logical-pitr + local type=logical + desc 'run backup backup-nfs-logical-pitr' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-logical-pitr ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-nfs-logical-pitr" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.AeVXFPmk78 ++ mktemp + local LAST_ERR=/tmp/tmp.gMYDA3M2VX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AeVXFPmk78 perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr created + cat /tmp/tmp.gMYDA3M2VX + rm /tmp/tmp.AeVXFPmk78 /tmp/tmp.gMYDA3M2VX + return 0 + wait_backup backup-nfs-logical-pitr + local backup_name=backup-nfs-logical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-logical-pitr to reach ready state..... + write_data 100502 -3rd + local x=100502 + local find_prefix=-3rd + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xgqIcnhx3P +++ mktemp ++ local LAST_ERR=/tmp/tmp.JJxLXJY6xJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xgqIcnhx3P ++ cat /tmp/tmp.JJxLXJY6xJ ++ rm /tmp/tmp.xgqIcnhx3P /tmp/tmp.JJxLXJY6xJ ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PixPsbDz60 ++ mktemp + local LAST_ERR=/tmp/tmp.GvBeSduA9U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PixPsbDz60 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e1776a11-5505-4896-b7b0-cf34e660867b") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.GvBeSduA9U + rm /tmp/tmp.PixPsbDz60 /tmp/tmp.GvBeSduA9U + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T09:54:32+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.8ovexaWN4P +++ mktemp ++ local LAST_ERR=/tmp/tmp.w8i1vqShBp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ovexaWN4P ++ cat /tmp/tmp.w8i1vqShBp ++ rm /tmp/tmp.8ovexaWN4P /tmp/tmp.w8i1vqShBp ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Cmihns8xJn ++ mktemp + local LAST_ERR=/tmp/tmp.GItefh1JKv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cmihns8xJn + cat /tmp/tmp.GItefh1JKv + rm /tmp/tmp.Cmihns8xJn /tmp/tmp.GItefh1JKv + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.U3WlJTolQa/find-3rd + run_pitr_check backup-nfs-logical-pitr some-name -3rd + local backup=backup-nfs-logical-pitr + local cluster=some-name + local find_prefix=-3rd + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RqoVN4YZSH +++ mktemp ++ local LAST_ERR=/tmp/tmp.p2GdT8jQIw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RqoVN4YZSH ++ cat /tmp/tmp.p2GdT8jQIw ++ rm /tmp/tmp.RqoVN4YZSH /tmp/tmp.p2GdT8jQIw ++ return 0 + local backup_last_write=1757843664 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RH46vsPkJL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.j1a9BKVILs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RH46vsPkJL +++ cat /tmp/tmp.j1a9BKVILs +++ rm /tmp/tmp.RH46vsPkJL /tmp/tmp.j1a9BKVILs +++ return 0 ++ echo 1757843631 + local last_chunk=1757843631 + [[ 1757843631 -gt 1757843664 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NzuauWrmk3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FWcj2reIVY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NzuauWrmk3 +++ cat /tmp/tmp.FWcj2reIVY +++ rm /tmp/tmp.NzuauWrmk3 /tmp/tmp.FWcj2reIVY +++ return 0 ++ echo 1757843631 + last_chunk=1757843631 + retries=1 ++ format_date 1757843631 ++ local timestamp=1757843631 +++ TZ=UTC +++ /usr/sbin/date -d@1757843631 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:53:51 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:54:40+0000] Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843631 -gt 1757843664 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8NEXuAxEnU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F2lPV7nmTw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8NEXuAxEnU +++ cat /tmp/tmp.F2lPV7nmTw +++ rm /tmp/tmp.8NEXuAxEnU /tmp/tmp.F2lPV7nmTw +++ return 0 ++ echo 1757843631 + last_chunk=1757843631 + retries=2 ++ format_date 1757843631 ++ local timestamp=1757843631 +++ TZ=UTC +++ /usr/sbin/date -d@1757843631 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:53:51 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:54:53+0000] Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843631 -gt 1757843664 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mv2p2Hw3Qn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.boMe5Mq77B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mv2p2Hw3Qn +++ cat /tmp/tmp.boMe5Mq77B +++ rm /tmp/tmp.mv2p2Hw3Qn /tmp/tmp.boMe5Mq77B +++ return 0 ++ echo 1757843631 + last_chunk=1757843631 + retries=3 ++ format_date 1757843631 ++ local timestamp=1757843631 +++ TZ=UTC +++ /usr/sbin/date -d@1757843631 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:53:51 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:55:05+0000] Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843631 -gt 1757843664 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HCRMlh73gg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iFoSb472Kp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HCRMlh73gg +++ cat /tmp/tmp.iFoSb472Kp +++ rm /tmp/tmp.HCRMlh73gg /tmp/tmp.iFoSb472Kp +++ return 0 ++ echo 1757843631 + last_chunk=1757843631 + retries=4 ++ format_date 1757843631 ++ local timestamp=1757843631 +++ TZ=UTC +++ /usr/sbin/date -d@1757843631 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:53:51 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:55:17+0000] Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843631 -gt 1757843664 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.id6iDmlpqU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MOzLhjTUhm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.id6iDmlpqU +++ cat /tmp/tmp.MOzLhjTUhm +++ rm /tmp/tmp.id6iDmlpqU /tmp/tmp.MOzLhjTUhm +++ return 0 ++ echo 1757843631 + last_chunk=1757843631 + retries=5 ++ format_date 1757843631 ++ local timestamp=1757843631 +++ TZ=UTC +++ /usr/sbin/date -d@1757843631 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:53:51 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:55:30+0000] Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843631 -gt 1757843664 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s7lBbdDZbd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7wu4s8zf6H +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.s7lBbdDZbd +++ cat /tmp/tmp.7wu4s8zf6H +++ rm /tmp/tmp.s7lBbdDZbd /tmp/tmp.7wu4s8zf6H +++ return 0 ++ echo 1757843631 + last_chunk=1757843631 + retries=6 ++ format_date 1757843631 ++ local timestamp=1757843631 +++ TZ=UTC +++ /usr/sbin/date -d@1757843631 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:53:51 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:55:42+0000] Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843631 -gt 1757843664 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lyzbLsBKaw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.U6nxo8w2vx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lyzbLsBKaw +++ cat /tmp/tmp.U6nxo8w2vx +++ rm /tmp/tmp.lyzbLsBKaw /tmp/tmp.U6nxo8w2vx +++ return 0 ++ echo 1757843631 + last_chunk=1757843631 + retries=7 ++ format_date 1757843631 ++ local timestamp=1757843631 +++ TZ=UTC +++ /usr/sbin/date -d@1757843631 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:53:51 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:55:54+0000] Waiting for last oplog chunk (2025-09-14 09:53:51) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843631 -gt 1757843664 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jUx1oP3jzS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TlzGQvQWT0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jUx1oP3jzS +++ cat /tmp/tmp.TlzGQvQWT0 +++ rm /tmp/tmp.jUx1oP3jzS /tmp/tmp.TlzGQvQWT0 +++ return 0 ++ echo 1757843757 + last_chunk=1757843757 + retries=8 ++ format_date 1757843757 ++ local timestamp=1757843757 +++ TZ=UTC +++ /usr/sbin/date -d@1757843757 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:55:57 ++ format_date 1757843664 ++ local timestamp=1757843664 +++ TZ=UTC +++ /usr/sbin/date -d@1757843664 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:54:24 + log 'Waiting for last oplog chunk (2025-09-14 09:55:57) to be greater than last write (2025-09-14 09:54:24)' + set +o xtrace [2025-09-14T09:56:07+0000] Waiting for last oplog chunk (2025-09-14 09:55:57) to be greater than last write (2025-09-14 09:54:24) + sleep 10 + [[ 1757843757 -gt 1757843664 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gb3GOjL2pu +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4nbD96WAm5 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.gb3GOjL2pu ++++ cat /tmp/tmp.4nbD96WAm5 ++++ rm /tmp/tmp.gb3GOjL2pu /tmp/tmp.4nbD96WAm5 ++++ return 0 +++ echo 1757843757 ++ format_date 1757843757 ++ local timestamp=1757843757 +++ TZ=UTC +++ /usr/sbin/date -d@1757843757 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:55:57 + local 'target_time=2025-09-14 09:55:57' + log 'dropping test collection' + set +o xtrace [2025-09-14T09:56:19+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Daik6maihS +++ mktemp ++ local LAST_ERR=/tmp/tmp.cRElhoILnv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Daik6maihS ++ cat /tmp/tmp.cRElhoILnv ++ rm /tmp/tmp.Daik6maihS /tmp/tmp.cRElhoILnv ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RGpcY48h2v ++ mktemp + local LAST_ERR=/tmp/tmp.GDvLDyieNq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RGpcY48h2v Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b7942833-75bb-4da9-abef-b4eea3ccf4cf") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.GDvLDyieNq + rm /tmp/tmp.RGpcY48h2v /tmp/tmp.GDvLDyieNq + return 0 + log 'checking pitr... backup: backup-nfs-logical-pitr target: 2025-09-14 09:55:57' + set +o xtrace [2025-09-14T09:56:21+0000] checking pitr... backup: backup-nfs-logical-pitr target: 2025-09-14 09:55:57 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-logical-pitr"' + yq eval '.spec.backupName = "backup-nfs-logical-pitr"' + yq eval '.spec.pitr.date = "2025-09-14 09:55:57"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KZKYawNMeP ++ mktemp + local LAST_ERR=/tmp/tmp.cRM1IK2Eic + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KZKYawNMeP perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical-pitr created + cat /tmp/tmp.cRM1IK2Eic + rm /tmp/tmp.KZKYawNMeP /tmp/tmp.cRM1IK2Eic + return 0 + wait_restore backup-nfs-logical-pitr some-name + local backup_name=backup-nfs-logical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical-pitr object to be createdOK Waiting psmdb-restore/restore-backup-nfs-logical-pitr to reach state "ready" OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gB2oHueaed +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZXHX81XL3K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gB2oHueaed ++ cat /tmp/tmp.ZXHX81XL3K ++ rm /tmp/tmp.gB2oHueaed /tmp/tmp.ZXHX81XL3K ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T09:56:49+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.H6UAHu6hmV +++ mktemp ++ local LAST_ERR=/tmp/tmp.9mHP55JSGK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H6UAHu6hmV ++ cat /tmp/tmp.9mHP55JSGK ++ rm /tmp/tmp.H6UAHu6hmV /tmp/tmp.9mHP55JSGK ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tIsfPcDUh4 ++ mktemp + local LAST_ERR=/tmp/tmp.CtMLAjD7Jm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tIsfPcDUh4 + cat /tmp/tmp.CtMLAjD7Jm + rm /tmp/tmp.tIsfPcDUh4 /tmp/tmp.CtMLAjD7Jm + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.U3WlJTolQa/find-3rd + desc 'CASE 3: Physical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 3: Physical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical + run_backup nfs backup-nfs-physical physical + local storage=nfs + local backup_name=backup-nfs-physical + local type=physical + desc 'run backup backup-nfs-physical' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-physical ----------------------------------------------------------------------------------- + yq eval '.metadata.name = "backup-nfs-physical" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.02SRWneqrH ++ mktemp + local LAST_ERR=/tmp/tmp.m5bsmNhxUM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.02SRWneqrH perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical created + cat /tmp/tmp.m5bsmNhxUM + rm /tmp/tmp.02SRWneqrH /tmp/tmp.m5bsmNhxUM + return 0 + wait_backup backup-nfs-physical + local backup_name=backup-nfs-physical + local target_state=ready + set +o xtrace waiting for backup-nfs-physical to reach ready state..... + run_recovery_check backup-nfs-physical some-name -4th -3rd + local backup=backup-nfs-physical + local cluster=some-name + local find_prefix_before=-4th + local find_prefix_after=-3rd + write_data 100501 -4th + local x=100501 + local find_prefix=-4th + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5N5W9WyjDE +++ mktemp ++ local LAST_ERR=/tmp/tmp.35Eb9tk2hl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5N5W9WyjDE ++ cat /tmp/tmp.35Eb9tk2hl ++ rm /tmp/tmp.5N5W9WyjDE /tmp/tmp.35Eb9tk2hl ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.olkVZyjqoZ ++ mktemp + local LAST_ERR=/tmp/tmp.BYSPsOAgwb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.olkVZyjqoZ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("51c6a580-2726-475f-818e-5745b2a9407a") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.BYSPsOAgwb + rm /tmp/tmp.olkVZyjqoZ /tmp/tmp.BYSPsOAgwb + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 -4th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix=-4th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T09:57:06+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.jTx7yiq4mi +++ mktemp ++ local LAST_ERR=/tmp/tmp.VlOcTTb15P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jTx7yiq4mi ++ cat /tmp/tmp.VlOcTTb15P ++ rm /tmp/tmp.jTx7yiq4mi /tmp/tmp.VlOcTTb15P ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.S2RwnndHz6 ++ mktemp + local LAST_ERR=/tmp/tmp.cPc9Tsvzib + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S2RwnndHz6 + cat /tmp/tmp.cPc9Tsvzib + rm /tmp/tmp.S2RwnndHz6 /tmp/tmp.cPc9Tsvzib + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find-4th.json /tmp/tmp.U3WlJTolQa/find-4th + run_restore backup-nfs-physical + local backup_name=backup-nfs-physical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-physical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.iJAQLXNlQz ++ mktemp + local LAST_ERR=/tmp/tmp.8aJc7v1nPh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iJAQLXNlQz perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical created + cat /tmp/tmp.8aJc7v1nPh + rm /tmp/tmp.iJAQLXNlQz /tmp/tmp.8aJc7v1nPh + return 0 + wait_restore backup-nfs-physical some-name + local backup_name=backup-nfs-physical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical object to be createdOK Waiting psmdb-restore/restore-backup-nfs-physical to reach state "ready" ...OK after 3 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kvYJ8olCUE +++ mktemp ++ local LAST_ERR=/tmp/tmp.SKBYbElv4J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kvYJ8olCUE ++ cat /tmp/tmp.SKBYbElv4J ++ rm /tmp/tmp.kvYJ8olCUE /tmp/tmp.SKBYbElv4J ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rQwzzZkVtH +++ mktemp ++ local LAST_ERR=/tmp/tmp.TaoOwzxVWO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rQwzzZkVtH ++ cat /tmp/tmp.TaoOwzxVWO ++ rm /tmp/tmp.rQwzzZkVtH /tmp/tmp.TaoOwzxVWO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3JuvYhr36k +++ mktemp ++ local LAST_ERR=/tmp/tmp.gsUTG7e8cv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3JuvYhr36k ++ cat /tmp/tmp.gsUTG7e8cv ++ rm /tmp/tmp.3JuvYhr36k /tmp/tmp.gsUTG7e8cv ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AxD8avZdbx +++ mktemp ++ local LAST_ERR=/tmp/tmp.3YbqMuWiLq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AxD8avZdbx ++ cat /tmp/tmp.3YbqMuWiLq ++ rm /tmp/tmp.AxD8avZdbx /tmp/tmp.3YbqMuWiLq ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uxf6ToTTlw +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZlJAwxjdzB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uxf6ToTTlw ++ cat /tmp/tmp.ZlJAwxjdzB ++ rm /tmp/tmp.Uxf6ToTTlw /tmp/tmp.ZlJAwxjdzB ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OyXYws4tU6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hvU96rczAT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OyXYws4tU6 ++ cat /tmp/tmp.hvU96rczAT ++ rm /tmp/tmp.OyXYws4tU6 /tmp/tmp.hvU96rczAT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LJL3O0EuEQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.0C5End7s1L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LJL3O0EuEQ ++ cat /tmp/tmp.0C5End7s1L ++ rm /tmp/tmp.LJL3O0EuEQ /tmp/tmp.0C5End7s1L ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HNY481KC8k +++ mktemp ++ local LAST_ERR=/tmp/tmp.cGMkB5f9DV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HNY481KC8k ++ cat /tmp/tmp.cGMkB5f9DV ++ rm /tmp/tmp.HNY481KC8k /tmp/tmp.cGMkB5f9DV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sMOoiR7cwP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Igq585JrPN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sMOoiR7cwP ++ cat /tmp/tmp.Igq585JrPN ++ rm /tmp/tmp.sMOoiR7cwP /tmp/tmp.Igq585JrPN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FiNlUpRhoa +++ mktemp ++ local LAST_ERR=/tmp/tmp.DluIanyAlZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FiNlUpRhoa ++ cat /tmp/tmp.DluIanyAlZ ++ rm /tmp/tmp.FiNlUpRhoa /tmp/tmp.DluIanyAlZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LmdYuwlJCX +++ mktemp ++ local LAST_ERR=/tmp/tmp.YoMw8WxXDW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LmdYuwlJCX ++ cat /tmp/tmp.YoMw8WxXDW ++ rm /tmp/tmp.LmdYuwlJCX /tmp/tmp.YoMw8WxXDW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tNL9A19krB +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZYyHtdLnQz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tNL9A19krB ++ cat /tmp/tmp.ZYyHtdLnQz ++ rm /tmp/tmp.tNL9A19krB /tmp/tmp.ZYyHtdLnQz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oax7eIQf80 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xyC9Ko80EZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oax7eIQf80 ++ cat /tmp/tmp.xyC9Ko80EZ ++ rm /tmp/tmp.oax7eIQf80 /tmp/tmp.xyC9Ko80EZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nSc4x3uVha +++ mktemp ++ local LAST_ERR=/tmp/tmp.LAIMSDedmL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nSc4x3uVha ++ cat /tmp/tmp.LAIMSDedmL ++ rm /tmp/tmp.nSc4x3uVha /tmp/tmp.LAIMSDedmL ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T10:03:50+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.CrxNAct4td +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_ERR=/tmp/tmp.IaEUjLLlcB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CrxNAct4td ++ cat /tmp/tmp.IaEUjLLlcB ++ rm /tmp/tmp.CrxNAct4td /tmp/tmp.IaEUjLLlcB ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gmrGi25rlz ++ mktemp + local LAST_ERR=/tmp/tmp.IwiQx0w6uq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gmrGi25rlz + cat /tmp/tmp.IwiQx0w6uq + rm /tmp/tmp.gmrGi25rlz /tmp/tmp.IwiQx0w6uq + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.U3WlJTolQa/find-3rd + desc 'CASE 4: Physical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 4: Physical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical-pitr + run_backup nfs backup-nfs-physical-pitr physical + local storage=nfs + local backup_name=backup-nfs-physical-pitr + local type=physical + desc 'run backup backup-nfs-physical-pitr' + set +o xtrace ----------------------------------------------------------------------------------- run backup backup-nfs-physical-pitr ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-nfs-physical-pitr" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/backup-nfs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.RqTcgox9iB ++ mktemp + local LAST_ERR=/tmp/tmp.YnVaRQMST1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RqTcgox9iB perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr created + cat /tmp/tmp.YnVaRQMST1 + rm /tmp/tmp.RqTcgox9iB /tmp/tmp.YnVaRQMST1 + return 0 + wait_backup backup-nfs-physical-pitr + local backup_name=backup-nfs-physical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-physical-pitr to reach ready state..... + write_data 100503 -5th + local x=100503 + local find_prefix=-5th + run_mongo 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XQPm5ExWWk +++ mktemp ++ local LAST_ERR=/tmp/tmp.aXteeaBbV1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XQPm5ExWWk ++ cat /tmp/tmp.aXteeaBbV1 ++ rm /tmp/tmp.XQPm5ExWWk /tmp/tmp.aXteeaBbV1 ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.wByHtWXPLE ++ mktemp + local LAST_ERR=/tmp/tmp.jXyChBF4Wg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wByHtWXPLE Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6518d9a7-990c-4278-8ae6-56c1eec37e18") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.jXyChBF4Wg + rm /tmp/tmp.wByHtWXPLE /tmp/tmp.jXyChBF4Wg + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T10:04:13+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.7xLepH0EaA +++ mktemp ++ local LAST_ERR=/tmp/tmp.UKWiia3lYM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7xLepH0EaA ++ cat /tmp/tmp.UKWiia3lYM ++ rm /tmp/tmp.7xLepH0EaA /tmp/tmp.UKWiia3lYM ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.XkzdO8hIag ++ mktemp + local LAST_ERR=/tmp/tmp.JGdeSKs1fh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XkzdO8hIag + cat /tmp/tmp.JGdeSKs1fh + rm /tmp/tmp.XkzdO8hIag /tmp/tmp.JGdeSKs1fh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.U3WlJTolQa/find-5th + run_pitr_check backup-nfs-physical-pitr some-name -5th + local backup=backup-nfs-physical-pitr + local cluster=some-name + local find_prefix=-5th + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Egp30N4P1f +++ mktemp ++ local LAST_ERR=/tmp/tmp.r6JzLCDfmS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Egp30N4P1f ++ cat /tmp/tmp.r6JzLCDfmS ++ rm /tmp/tmp.Egp30N4P1f /tmp/tmp.r6JzLCDfmS ++ return 0 + local backup_last_write=1757844240 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VG2D3jCK1R ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Oqm304NmlQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.VG2D3jCK1R +++ cat /tmp/tmp.Oqm304NmlQ +++ rm /tmp/tmp.VG2D3jCK1R /tmp/tmp.Oqm304NmlQ +++ return 0 ++ echo 1757843835 + local last_chunk=1757843835 + [[ 1757843835 -gt 1757844240 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kZ1bNXaAwl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zW5ORVGvmR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kZ1bNXaAwl +++ cat /tmp/tmp.zW5ORVGvmR +++ rm /tmp/tmp.kZ1bNXaAwl /tmp/tmp.zW5ORVGvmR +++ return 0 ++ echo 1757843835 + last_chunk=1757843835 + retries=1 ++ format_date 1757843835 ++ local timestamp=1757843835 +++ TZ=UTC +++ /usr/sbin/date -d@1757843835 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:57:15 ++ format_date 1757844240 ++ local timestamp=1757844240 +++ TZ=UTC +++ /usr/sbin/date -d@1757844240 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:04:00 + log 'Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00)' + set +o xtrace [2025-09-14T10:04:23+0000] Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00) + sleep 10 + [[ 1757843835 -gt 1757844240 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.w0yrfZQlGL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.o48wL1d1e7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.w0yrfZQlGL +++ cat /tmp/tmp.o48wL1d1e7 +++ rm /tmp/tmp.w0yrfZQlGL /tmp/tmp.o48wL1d1e7 +++ return 0 ++ echo 1757843835 + last_chunk=1757843835 + retries=2 ++ format_date 1757843835 ++ local timestamp=1757843835 +++ TZ=UTC +++ /usr/sbin/date -d@1757843835 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:57:15 ++ format_date 1757844240 ++ local timestamp=1757844240 +++ TZ=UTC +++ /usr/sbin/date -d@1757844240 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:04:00 + log 'Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00)' + set +o xtrace [2025-09-14T10:04:36+0000] Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00) + sleep 10 + [[ 1757843835 -gt 1757844240 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Sa2XfTsJEL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NFCKdrNpj1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Sa2XfTsJEL +++ cat /tmp/tmp.NFCKdrNpj1 +++ rm /tmp/tmp.Sa2XfTsJEL /tmp/tmp.NFCKdrNpj1 +++ return 0 ++ echo 1757843835 + last_chunk=1757843835 + retries=3 ++ format_date 1757843835 ++ local timestamp=1757843835 +++ TZ=UTC +++ /usr/sbin/date -d@1757843835 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:57:15 ++ format_date 1757844240 ++ local timestamp=1757844240 +++ TZ=UTC +++ /usr/sbin/date -d@1757844240 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:04:00 + log 'Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00)' + set +o xtrace [2025-09-14T10:04:48+0000] Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00) + sleep 10 + [[ 1757843835 -gt 1757844240 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cJ1VCZ8TIw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fMuyXBUuiQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cJ1VCZ8TIw +++ cat /tmp/tmp.fMuyXBUuiQ +++ rm /tmp/tmp.cJ1VCZ8TIw /tmp/tmp.fMuyXBUuiQ +++ return 0 ++ echo 1757843835 + last_chunk=1757843835 + retries=4 ++ format_date 1757843835 ++ local timestamp=1757843835 +++ TZ=UTC +++ /usr/sbin/date -d@1757843835 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:57:15 ++ format_date 1757844240 ++ local timestamp=1757844240 +++ TZ=UTC +++ /usr/sbin/date -d@1757844240 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:04:00 + log 'Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00)' + set +o xtrace [2025-09-14T10:05:00+0000] Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00) + sleep 10 + [[ 1757843835 -gt 1757844240 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lZyEBVSlQt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.swO3KebEJb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lZyEBVSlQt +++ cat /tmp/tmp.swO3KebEJb +++ rm /tmp/tmp.lZyEBVSlQt /tmp/tmp.swO3KebEJb +++ return 0 ++ echo 1757843835 + last_chunk=1757843835 + retries=5 ++ format_date 1757843835 ++ local timestamp=1757843835 +++ TZ=UTC +++ /usr/sbin/date -d@1757843835 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 09:57:15 ++ format_date 1757844240 ++ local timestamp=1757844240 +++ TZ=UTC +++ /usr/sbin/date -d@1757844240 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:04:00 + log 'Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00)' + set +o xtrace [2025-09-14T10:05:12+0000] Waiting for last oplog chunk (2025-09-14 09:57:15) to be greater than last write (2025-09-14 10:04:00) + sleep 10 + [[ 1757843835 -gt 1757844240 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZaFeo1vZxR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2pZ22F4Qjk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZaFeo1vZxR +++ cat /tmp/tmp.2pZ22F4Qjk +++ rm /tmp/tmp.ZaFeo1vZxR /tmp/tmp.2pZ22F4Qjk +++ return 0 ++ echo 1757844312 + last_chunk=1757844312 + retries=6 ++ format_date 1757844312 ++ local timestamp=1757844312 +++ TZ=UTC +++ /usr/sbin/date -d@1757844312 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:05:12 ++ format_date 1757844240 ++ local timestamp=1757844240 +++ TZ=UTC +++ /usr/sbin/date -d@1757844240 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:04:00 + log 'Waiting for last oplog chunk (2025-09-14 10:05:12) to be greater than last write (2025-09-14 10:04:00)' + set +o xtrace [2025-09-14T10:05:24+0000] Waiting for last oplog chunk (2025-09-14 10:05:12) to be greater than last write (2025-09-14 10:04:00) + sleep 10 + [[ 1757844312 -gt 1757844240 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3tQ7LAgROG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2s7rTJWZzP ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3tQ7LAgROG ++++ cat /tmp/tmp.2s7rTJWZzP ++++ rm /tmp/tmp.3tQ7LAgROG /tmp/tmp.2s7rTJWZzP ++++ return 0 +++ echo 1757844312 ++ format_date 1757844312 ++ local timestamp=1757844312 +++ TZ=UTC +++ /usr/sbin/date -d@1757844312 '+%Y-%m-%d %H:%M:%S' ++ echo 2025-09-14 10:05:12 + local 'target_time=2025-09-14 10:05:12' + log 'dropping test collection' + set +o xtrace [2025-09-14T10:05:37+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IZo0FwKgRp +++ mktemp ++ local LAST_ERR=/tmp/tmp.FJazSz8fRn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IZo0FwKgRp ++ cat /tmp/tmp.FJazSz8fRn ++ rm /tmp/tmp.IZo0FwKgRp /tmp/tmp.FJazSz8fRn ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.V9u9oBKHNw ++ mktemp + local LAST_ERR=/tmp/tmp.PRpWJKRyvr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V9u9oBKHNw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-8637.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7858fcea-5ef4-4ca3-90cc-f815e9cef775") } Percona Server for MongoDB server version: v8.0.12-4 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.PRpWJKRyvr + rm /tmp/tmp.V9u9oBKHNw /tmp/tmp.PRpWJKRyvr + return 0 + log 'checking pitr... backup: backup-nfs-physical-pitr target: 2025-09-14 10:05:12' + set +o xtrace [2025-09-14T10:05:39+0000] checking pitr... backup: backup-nfs-physical-pitr target: 2025-09-14 10:05:12 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-physical-pitr"' + yq eval '.spec.backupName = "backup-nfs-physical-pitr"' + yq eval '.spec.pitr.date = "2025-09-14 10:05:12"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.NFUwbL7I1y ++ mktemp + local LAST_ERR=/tmp/tmp.1z4RxPReGB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NFUwbL7I1y perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical-pitr created + cat /tmp/tmp.1z4RxPReGB + rm /tmp/tmp.NFUwbL7I1y /tmp/tmp.1z4RxPReGB + return 0 + wait_restore backup-nfs-physical-pitr some-name + local backup_name=backup-nfs-physical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical-pitr object to be createdOK Waiting psmdb-restore/restore-backup-nfs-physical-pitr to reach state "ready" .....OK after 5 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UzMjgykrIa +++ mktemp ++ local LAST_ERR=/tmp/tmp.W4PviO5d3K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UzMjgykrIa ++ cat /tmp/tmp.W4PviO5d3K ++ rm /tmp/tmp.UzMjgykrIa /tmp/tmp.W4PviO5d3K ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EeqnIHUA33 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TbRKmTTsIW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EeqnIHUA33 ++ cat /tmp/tmp.TbRKmTTsIW ++ rm /tmp/tmp.EeqnIHUA33 /tmp/tmp.TbRKmTTsIW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XhODX3vtCA +++ mktemp ++ local LAST_ERR=/tmp/tmp.uOHRfiK0Qu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XhODX3vtCA ++ cat /tmp/tmp.uOHRfiK0Qu ++ rm /tmp/tmp.XhODX3vtCA /tmp/tmp.uOHRfiK0Qu ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dtCFvNKVa3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GaI3F7xINB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dtCFvNKVa3 ++ cat /tmp/tmp.GaI3F7xINB ++ rm /tmp/tmp.dtCFvNKVa3 /tmp/tmp.GaI3F7xINB ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.39riIcwYCS +++ mktemp ++ local LAST_ERR=/tmp/tmp.zvZcEnphbV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.39riIcwYCS ++ cat /tmp/tmp.zvZcEnphbV ++ rm /tmp/tmp.39riIcwYCS /tmp/tmp.zvZcEnphbV ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m5A1Hhf705 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1TEtsMshbH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m5A1Hhf705 ++ cat /tmp/tmp.1TEtsMshbH ++ rm /tmp/tmp.m5A1Hhf705 /tmp/tmp.1TEtsMshbH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A5Dw6JARHZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.lrLoazQ8Sz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A5Dw6JARHZ ++ cat /tmp/tmp.lrLoazQ8Sz ++ rm /tmp/tmp.A5Dw6JARHZ /tmp/tmp.lrLoazQ8Sz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TNp78oC5El +++ mktemp ++ local LAST_ERR=/tmp/tmp.4eSAz7GiBp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TNp78oC5El ++ cat /tmp/tmp.4eSAz7GiBp ++ rm /tmp/tmp.TNp78oC5El /tmp/tmp.4eSAz7GiBp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZPSXLH3ydy +++ mktemp ++ local LAST_ERR=/tmp/tmp.1uBqNZGMDA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZPSXLH3ydy ++ cat /tmp/tmp.1uBqNZGMDA ++ rm /tmp/tmp.ZPSXLH3ydy /tmp/tmp.1uBqNZGMDA ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rhA7BctlOq +++ mktemp ++ local LAST_ERR=/tmp/tmp.ncyDD5GjDL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rhA7BctlOq ++ cat /tmp/tmp.ncyDD5GjDL ++ rm /tmp/tmp.rhA7BctlOq /tmp/tmp.ncyDD5GjDL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tdq30NmeNS +++ mktemp ++ local LAST_ERR=/tmp/tmp.lxHNRHQdYo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tdq30NmeNS ++ cat /tmp/tmp.lxHNRHQdYo ++ rm /tmp/tmp.Tdq30NmeNS /tmp/tmp.lxHNRHQdYo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4a9dMZGBhN +++ mktemp ++ local LAST_ERR=/tmp/tmp.HL6eKFHwbT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4a9dMZGBhN ++ cat /tmp/tmp.HL6eKFHwbT ++ rm /tmp/tmp.4a9dMZGBhN /tmp/tmp.HL6eKFHwbT ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i93pIiIiSi +++ mktemp ++ local LAST_ERR=/tmp/tmp.waSFnypAqE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i93pIiIiSi ++ cat /tmp/tmp.waSFnypAqE ++ rm /tmp/tmp.i93pIiIiSi /tmp/tmp.waSFnypAqE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LSKkQ8AEKm +++ mktemp ++ local LAST_ERR=/tmp/tmp.X9YcnYlq1g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LSKkQ8AEKm ++ cat /tmp/tmp.X9YcnYlq1g ++ rm /tmp/tmp.LSKkQ8AEKm /tmp/tmp.X9YcnYlq1g ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-8637 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-09-14T10:13:41+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-8637 mongodb .svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-8637 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.jcmVMfvW61 +++ mktemp ++ local LAST_ERR=/tmp/tmp.F26cEzUElP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jcmVMfvW61 ++ cat /tmp/tmp.F26cEzUElP ++ rm /tmp/tmp.jcmVMfvW61 /tmp/tmp.F26cEzUElP ++ return 0 + local client_container=psmdb-client-66f577db5f-jqq7z + local mongo_flag= + [[ myApp:myPass@some-name-rs0.demand-backup-fs-8637 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yFsltHVzGP ++ mktemp + local LAST_ERR=/tmp/tmp.64UdsRySM4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-jqq7z -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-8637.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yFsltHVzGP + cat /tmp/tmp.64UdsRySM4 + rm /tmp/tmp.yFsltHVzGP /tmp/tmp.64UdsRySM4 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.U3WlJTolQa/find-5th + destroy demand-backup-fs-8637 + local namespace=demand-backup-fs-8637 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.12H3rdjxyY +++ mktemp ++ local LAST_ERR=/tmp/tmp.NDY04bR1AI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.12H3rdjxyY ++ cat /tmp/tmp.NDY04bR1AI ++ rm /tmp/tmp.12H3rdjxyY /tmp/tmp.NDY04bR1AI ++ return 0 + '[' 4 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.aQhoCnjWlI ++ mktemp + local LAST_ERR=/tmp/tmp.qYNyTV6IqI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aQhoCnjWlI NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-nfs-logical some-name nfs /mnt/nfs/2025-09-14T09:53:29Z logical 39.59KB ready 20m 20m backup-nfs-logical-pitr some-name nfs /mnt/nfs/2025-09-14T09:54:18Z logical 49.57KB ready 19m 19m backup-nfs-physical some-name nfs /mnt/nfs/2025-09-14T09:56:52Z physical 1.37MB ready 16m 16m backup-nfs-physical-pitr some-name nfs /mnt/nfs/2025-09-14T10:03:57Z physical 862.90KB ready 9m42s 9m50s + cat /tmp/tmp.qYNyTV6IqI + rm /tmp/tmp.aQhoCnjWlI /tmp/tmp.qYNyTV6IqI + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.DyrlqAFb7c ++ mktemp + local LAST_ERR=/tmp/tmp.vXtCUfLPLw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DyrlqAFb7c perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical" deleted from demand-backup-fs-8637 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical-pitr" deleted from demand-backup-fs-8637 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical" deleted from demand-backup-fs-8637 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical-pitr" deleted from demand-backup-fs-8637 namespace + cat /tmp/tmp.vXtCUfLPLw + rm /tmp/tmp.DyrlqAFb7c /tmp/tmp.vXtCUfLPLw + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.z4y2GFh1yY ++ mktemp + local LAST_ERR=/tmp/tmp.Ug5nTbESMY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z4y2GFh1yY customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Ug5nTbESMY + rm /tmp/tmp.z4y2GFh1yY /tmp/tmp.Ug5nTbESMY + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.oi2SCl8yw3 ++ mktemp + local LAST_ERR=/tmp/tmp.7aCigOeTG0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oi2SCl8yw3 + cat /tmp/tmp.7aCigOeTG0 + rm /tmp/tmp.oi2SCl8yw3 /tmp/tmp.7aCigOeTG0 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ITzx3bUue9 ++ mktemp + local LAST_ERR=/tmp/tmp.RiiHTDVcwo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ITzx3bUue9 + cat /tmp/tmp.RiiHTDVcwo + rm /tmp/tmp.ITzx3bUue9 /tmp/tmp.RiiHTDVcwo + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-8637 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.6o0JTpippB ++ mktemp + local LAST_ERR=/tmp/tmp.lNGktHMhr8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6o0JTpippB customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.lNGktHMhr8 + rm /tmp/tmp.6o0JTpippB /tmp/tmp.lNGktHMhr8 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Bp36oqOkOi ++ mktemp + local LAST_ERR=/tmp/tmp.RVvtfnZCA3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bp36oqOkOi clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.RVvtfnZCA3 + rm /tmp/tmp.Bp36oqOkOi /tmp/tmp.RVvtfnZCA3 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gQQq0d3Xdi ++ mktemp + local LAST_ERR=/tmp/tmp.sPKZkPRZCH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.gQQq0d3Xdi + cat /tmp/tmp.sPKZkPRZCH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.gQQq0d3Xdi + cat /tmp/tmp.sPKZkPRZCH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.gQQq0d3Xdi + cat /tmp/tmp.sPKZkPRZCH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.gQQq0d3Xdi + cat /tmp/tmp.sPKZkPRZCH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.gQQq0d3Xdi /tmp/tmp.sPKZkPRZCH + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-fs-8637 + rm -rf /tmp/tmp.U3WlJTolQa + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.Y0xY8futNW + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.PIKKoXfC9O ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.I05CWEoJdD + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.0uzB7C160l + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-fs-8637 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator