Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/logs/demand-backup-fs.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + cluster=some-name + create_infra demand-backup-fs-21007 + local ns=demand-backup-fs-21007 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.ATW9otaDEa ++ mktemp + local LAST_ERR=/tmp/tmp.IVXwx20fvt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ATW9otaDEa customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.IVXwx20fvt + rm /tmp/tmp.ATW9otaDEa /tmp/tmp.IVXwx20fvt + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-4141 backup-nfs-logical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-4141 backup-nfs-logical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-4141 backup-nfs-physical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-4141 backup-nfs-physical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.gKp87cMOCy ++ mktemp + local LAST_ERR=/tmp/tmp.WnuDLaGHd2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gKp87cMOCy customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.WnuDLaGHd2 + rm /tmp/tmp.gKp87cMOCy /tmp/tmp.WnuDLaGHd2 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.l5g7hD0Zfa ++ mktemp + local LAST_ERR=/tmp/tmp.b2XwPbAWz5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l5g7hD0Zfa + cat /tmp/tmp.b2XwPbAWz5 + rm /tmp/tmp.l5g7hD0Zfa /tmp/tmp.b2XwPbAWz5 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-4141 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.j0UZuNwuMi ++ mktemp + local LAST_ERR=/tmp/tmp.IWYmG48dly + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j0UZuNwuMi customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.IWYmG48dly + rm /tmp/tmp.j0UZuNwuMi /tmp/tmp.IWYmG48dly + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.xkQYmqepSW ++ mktemp + local LAST_ERR=/tmp/tmp.yn4hN7kRop + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xkQYmqepSW clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.yn4hN7kRop + rm /tmp/tmp.xkQYmqepSW /tmp/tmp.yn4hN7kRop + return 0 + check_crd_for_deletion PR-2226-1e37b178 + local git_tag=PR-2226-1e37b178 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2226-1e37b178/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F52bblFviX +++ mktemp ++ local LAST_ERR=/tmp/tmp.SlAqzY1Tgs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.F52bblFviX ++ cat /tmp/tmp.SlAqzY1Tgs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.F52bblFviX ++ cat /tmp/tmp.SlAqzY1Tgs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.F52bblFviX ++ cat /tmp/tmp.SlAqzY1Tgs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.F52bblFviX ++ cat /tmp/tmp.SlAqzY1Tgs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.F52bblFviX /tmp/tmp.SlAqzY1Tgs ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.0IpKV7FtiM + local LAST_OUT=/tmp/tmp.3woVeZRPHd ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.jzRlmgMt18 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.ueJGjqT8wv + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0IpKV7FtiM + cat /tmp/tmp.jzRlmgMt18 + rm /tmp/tmp.0IpKV7FtiM /tmp/tmp.jzRlmgMt18 + return 0 namespace "demand-backup-fs-4141" deleted namespace "storage" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3woVeZRPHd namespace "psmdb-operator" deleted + cat /tmp/tmp.ueJGjqT8wv + rm /tmp/tmp.3woVeZRPHd /tmp/tmp.ueJGjqT8wv + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.xpJa1em1oh ++ mktemp + local LAST_ERR=/tmp/tmp.M9BqohMRBY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xpJa1em1oh + cat /tmp/tmp.M9BqohMRBY + rm /tmp/tmp.xpJa1em1oh /tmp/tmp.M9BqohMRBY + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FkRDwOk4B9 ++ mktemp + local LAST_ERR=/tmp/tmp.YMXN3nVQy9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FkRDwOk4B9 namespace/psmdb-operator created + cat /tmp/tmp.YMXN3nVQy9 + rm /tmp/tmp.FkRDwOk4B9 /tmp/tmp.YMXN3nVQy9 + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.QwIjyrH90e +++ mktemp ++ local LAST_ERR=/tmp/tmp.SHF1QbZVaF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QwIjyrH90e ++ cat /tmp/tmp.SHF1QbZVaF ++ rm /tmp/tmp.QwIjyrH90e /tmp/tmp.SHF1QbZVaF ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2226-1e37b178-2-cluster12 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2wTfFchlhV ++ mktemp + local LAST_ERR=/tmp/tmp.VKlvtodNdF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2226-1e37b178-2-cluster12 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2wTfFchlhV Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2226-1e37b178-2-cluster12" modified. + cat /tmp/tmp.VKlvtodNdF + rm /tmp/tmp.2wTfFchlhV /tmp/tmp.VKlvtodNdF + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2226-1e37b178' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2226-1e37b178 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.iTeb6h1Wff ++ mktemp + local LAST_ERR=/tmp/tmp.URzcgI80hS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iTeb6h1Wff customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.URzcgI80hS + rm /tmp/tmp.iTeb6h1Wff /tmp/tmp.URzcgI80hS + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.dNgetrRvbA ++ mktemp + local LAST_ERR=/tmp/tmp.pMCbv4czyU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dNgetrRvbA clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.pMCbv4czyU + rm /tmp/tmp.dNgetrRvbA /tmp/tmp.pMCbv4czyU + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2226-1e37b178") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.aja7DVkXvm ++ mktemp + local LAST_ERR=/tmp/tmp.B9MTzNrIAd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aja7DVkXvm deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.B9MTzNrIAd + rm /tmp/tmp.aja7DVkXvm /tmp/tmp.B9MTzNrIAd + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.pGGrZJKhoY +++ mktemp ++ local LAST_ERR=/tmp/tmp.eecrleYVeV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pGGrZJKhoY ++ cat /tmp/tmp.eecrleYVeV ++ rm /tmp/tmp.pGGrZJKhoY /tmp/tmp.eecrleYVeV ++ return 0 + wait_operator_pod percona-server-mongodb-operator-bff7975f5-rcghc + local pod=percona-server-mongodb-operator-bff7975f5-rcghc + set +o xtrace waiting for pod/percona-server-mongodb-operator-bff7975f5-rcghc to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.M4MdKIAL5F +++ mktemp ++ local LAST_ERR=/tmp/tmp.91MXIebDgL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M4MdKIAL5F ++ cat /tmp/tmp.91MXIebDgL ++ rm /tmp/tmp.M4MdKIAL5F /tmp/tmp.91MXIebDgL ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-bff7975f5-rcghc ++ mktemp + local LAST_OUT=/tmp/tmp.9hc4mtth8j ++ mktemp + local LAST_ERR=/tmp/tmp.MUKYGBjdJA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-bff7975f5-rcghc + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9hc4mtth8j + cat /tmp/tmp.MUKYGBjdJA + rm /tmp/tmp.9hc4mtth8j /tmp/tmp.MUKYGBjdJA + return 0 2026-02-03T14:36:04.483Z INFO setup Manager starting up {"gitCommit": "1e37b17888853ae8e125b9de8658b67d796c4f87", "gitBranch": "PR-2226-1e37b178", "buildTime": "", "goVersion": "go1.25.6", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-fs-21007 + local namespace=demand-backup-fs-21007 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + xargs kubectl delete ns + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-fs-21007' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-fs-21007 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-fs-21007 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.U5sloUrz6k ++ mktemp + local LAST_OUT=/tmp/tmp.Du4E9RA8TU + local LAST_ERR=/tmp/tmp.z4DTtCdz2a + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.LTcCzjTlQX + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-fs-21007 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U5sloUrz6k + cat /tmp/tmp.z4DTtCdz2a + rm /tmp/tmp.U5sloUrz6k /tmp/tmp.z4DTtCdz2a + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Du4E9RA8TU + cat /tmp/tmp.LTcCzjTlQX + rm /tmp/tmp.Du4E9RA8TU /tmp/tmp.LTcCzjTlQX + return 0 + kubectl_bin wait --for=delete namespace demand-backup-fs-21007 ++ mktemp + local LAST_OUT=/tmp/tmp.BEcrsnExoi ++ mktemp + local LAST_ERR=/tmp/tmp.X2OCifMZXp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-fs-21007 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BEcrsnExoi + cat /tmp/tmp.X2OCifMZXp + rm /tmp/tmp.BEcrsnExoi /tmp/tmp.X2OCifMZXp + return 0 + desc 'create namespace demand-backup-fs-21007' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-fs-21007 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-fs-21007 ++ mktemp + local LAST_OUT=/tmp/tmp.0tEKddaVF9 ++ mktemp + local LAST_ERR=/tmp/tmp.EsjllnWOk3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-fs-21007 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0tEKddaVF9 namespace/demand-backup-fs-21007 created + cat /tmp/tmp.EsjllnWOk3 + rm /tmp/tmp.0tEKddaVF9 /tmp/tmp.EsjllnWOk3 + return 0 + set_kube_ctx demand-backup-fs-21007 + local namespace=demand-backup-fs-21007 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.b02RSn6tQ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vJDjR9zBXt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b02RSn6tQ4 ++ cat /tmp/tmp.vJDjR9zBXt ++ rm /tmp/tmp.b02RSn6tQ4 /tmp/tmp.vJDjR9zBXt ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2226-1e37b178-2-cluster12 --namespace=demand-backup-fs-21007 ++ mktemp + local LAST_OUT=/tmp/tmp.I3IbM2mtTT ++ mktemp + local LAST_ERR=/tmp/tmp.OCSPV1cOb3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2226-1e37b178-2-cluster12 --namespace=demand-backup-fs-21007 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I3IbM2mtTT Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2226-1e37b178-2-cluster12" modified. + cat /tmp/tmp.OCSPV1cOb3 + rm /tmp/tmp.I3IbM2mtTT /tmp/tmp.OCSPV1cOb3 + return 0 + kubectl_bin delete ns storage ++ mktemp + local LAST_OUT=/tmp/tmp.jTd8cUopE9 ++ mktemp + local LAST_ERR=/tmp/tmp.axNuS9DlgE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jTd8cUopE9 + cat /tmp/tmp.axNuS9DlgE Error from server (NotFound): namespaces "storage" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jTd8cUopE9 + cat /tmp/tmp.axNuS9DlgE Error from server (NotFound): namespaces "storage" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jTd8cUopE9 + cat /tmp/tmp.axNuS9DlgE Error from server (NotFound): namespaces "storage" not found + sleep 8 + cat /tmp/tmp.jTd8cUopE9 + cat /tmp/tmp.axNuS9DlgE Error from server (NotFound): namespaces "storage" not found + rm /tmp/tmp.jTd8cUopE9 /tmp/tmp.axNuS9DlgE + return 1 + : + [[ 1 != 1 ]] + uid=1001 + [[ -n '' ]] + log 'deploying NFS server' + set +o xtrace [2026-02-03T14:36:48+0000] deploying NFS server + deploy_nfs_server 1001 + local uid=1001 + kubectl_bin create namespace storage ++ mktemp + local LAST_OUT=/tmp/tmp.odwV8e34s0 ++ mktemp + local LAST_ERR=/tmp/tmp.gpfKG8nHnO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace storage + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.odwV8e34s0 namespace/storage created + cat /tmp/tmp.gpfKG8nHnO + rm /tmp/tmp.odwV8e34s0 /tmp/tmp.gpfKG8nHnO + return 0 + kubectl_bin apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/nfs-server.yml ++ mktemp + local LAST_OUT=/tmp/tmp.pre4wJbgNg ++ mktemp + local LAST_ERR=/tmp/tmp.bLnAeKse2F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/nfs-server.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pre4wJbgNg serviceaccount/nfs-server created rolebinding.rbac.authorization.k8s.io/system:openshift:scc:privileged created persistentvolumeclaim/nfs-pvc created deployment.apps/nfs-server created service/nfs-service created + cat /tmp/tmp.bLnAeKse2F + rm /tmp/tmp.pre4wJbgNg /tmp/tmp.bLnAeKse2F + return 0 + sleep 5 ++ kubectl_bin get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2dwu0uWXxL +++ mktemp ++ local LAST_ERR=/tmp/tmp.sGI2RxWf60 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2dwu0uWXxL ++ cat /tmp/tmp.sGI2RxWf60 ++ rm /tmp/tmp.2dwu0uWXxL /tmp/tmp.sGI2RxWf60 ++ return 0 + local nfsPod=nfs-server-7654469b4d-r79lh ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QyNAPE9quW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rc1NncOYl3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QyNAPE9quW ++ cat /tmp/tmp.Rc1NncOYl3 ++ rm /tmp/tmp.QyNAPE9quW /tmp/tmp.Rc1NncOYl3 ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-7654469b4d-r79lh to start Running' + set +o xtrace [2026-02-03T14:37:00+0000] Waiting for nfs-server-7654469b4d-r79lh to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5DzQNrqFWf +++ mktemp ++ local LAST_ERR=/tmp/tmp.vagiGm0Dat ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5DzQNrqFWf ++ cat /tmp/tmp.vagiGm0Dat ++ rm /tmp/tmp.5DzQNrqFWf /tmp/tmp.vagiGm0Dat ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-7654469b4d-r79lh to start Running' + set +o xtrace [2026-02-03T14:37:02+0000] Waiting for nfs-server-7654469b4d-r79lh to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a7pmvtUo42 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Djnho5QAD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a7pmvtUo42 ++ cat /tmp/tmp.4Djnho5QAD ++ rm /tmp/tmp.a7pmvtUo42 /tmp/tmp.4Djnho5QAD ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-7654469b4d-r79lh to start Running' + set +o xtrace [2026-02-03T14:37:04+0000] Waiting for nfs-server-7654469b4d-r79lh to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KaguG20fdS +++ mktemp ++ local LAST_ERR=/tmp/tmp.3d3XJ1NuEp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KaguG20fdS ++ cat /tmp/tmp.3d3XJ1NuEp ++ rm /tmp/tmp.KaguG20fdS /tmp/tmp.3d3XJ1NuEp ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-7654469b4d-r79lh to start Running' + set +o xtrace [2026-02-03T14:37:06+0000] Waiting for nfs-server-7654469b4d-r79lh to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DzPtoAAMZV +++ mktemp ++ local LAST_ERR=/tmp/tmp.gLeWDLjjVX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DzPtoAAMZV ++ cat /tmp/tmp.gLeWDLjjVX ++ rm /tmp/tmp.DzPtoAAMZV /tmp/tmp.gLeWDLjjVX ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-7654469b4d-r79lh to start Running' + set +o xtrace [2026-02-03T14:37:08+0000] Waiting for nfs-server-7654469b4d-r79lh to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SwnocMJ7A6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mSonfYEMdm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SwnocMJ7A6 ++ cat /tmp/tmp.mSonfYEMdm ++ rm /tmp/tmp.SwnocMJ7A6 /tmp/tmp.mSonfYEMdm ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-7654469b4d-r79lh to start Running' + set +o xtrace [2026-02-03T14:37:11+0000] Waiting for nfs-server-7654469b4d-r79lh to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7yoUweIPmy +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zi1Y8nPSKS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7yoUweIPmy ++ cat /tmp/tmp.Zi1Y8nPSKS ++ rm /tmp/tmp.7yoUweIPmy /tmp/tmp.Zi1Y8nPSKS ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-7654469b4d-r79lh to start Running' + set +o xtrace [2026-02-03T14:37:13+0000] Waiting for nfs-server-7654469b4d-r79lh to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZPeH64UdOC +++ mktemp ++ local LAST_ERR=/tmp/tmp.uMv9oqtI43 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-7654469b4d-r79lh -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZPeH64UdOC ++ cat /tmp/tmp.uMv9oqtI43 ++ rm /tmp/tmp.ZPeH64UdOC /tmp/tmp.uMv9oqtI43 ++ return 0 + [[ Running == \R\u\n\n\i\n\g ]] + kubectl_bin exec -n storage nfs-server-7654469b4d-r79lh -- mkdir /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.vU0r00DXuZ ++ mktemp + local LAST_ERR=/tmp/tmp.zj5NJwfL7O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-7654469b4d-r79lh -- mkdir /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vU0r00DXuZ + cat /tmp/tmp.zj5NJwfL7O + rm /tmp/tmp.vU0r00DXuZ /tmp/tmp.zj5NJwfL7O + return 0 + kubectl_bin exec -n storage nfs-server-7654469b4d-r79lh -- chown 1001:1001 /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.iqbEThi5Zw ++ mktemp + local LAST_ERR=/tmp/tmp.KnTXVCxktG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-7654469b4d-r79lh -- chown 1001:1001 /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iqbEThi5Zw + cat /tmp/tmp.KnTXVCxktG + rm /tmp/tmp.iqbEThi5Zw /tmp/tmp.KnTXVCxktG + return 0 + log 'creating secrets and start client' + set +o xtrace [2026-02-03T14:37:18+0000] creating secrets and start client + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.kv73CWPqQp ++ mktemp + local LAST_ERR=/tmp/tmp.Etx9zLvYky + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kv73CWPqQp secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.Etx9zLvYky + rm /tmp/tmp.kv73CWPqQp /tmp/tmp.Etx9zLvYky + return 0 + [[ -n '' ]] + log 'creating PSMDB cluster some-name' + set +o xtrace [2026-02-03T14:37:20+0000] creating PSMDB cluster some-name + [[ 1 != 1 ]] + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.dgzjRKiK7F + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-fs-21007/g + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2226-1e37b178"' ++ mktemp + local LAST_ERR=/tmp/tmp.npRevkFV83 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dgzjRKiK7F perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.npRevkFV83 + rm /tmp/tmp.dgzjRKiK7F /tmp/tmp.npRevkFV83 + return 0 + log 'wait for all 3 pods to start' + set +o xtrace [2026-02-03T14:37:22+0000] wait for all 3 pods to start + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready...................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WPd042o7Tv +++ mktemp ++ local LAST_ERR=/tmp/tmp.8gM0ZTlhJU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WPd042o7Tv ++ cat /tmp/tmp.8gM0ZTlhJU ++ rm /tmp/tmp.WPd042o7Tv /tmp/tmp.8gM0ZTlhJU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0WneetABkH +++ mktemp ++ local LAST_ERR=/tmp/tmp.0MqyN6qOqu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0WneetABkH ++ cat /tmp/tmp.0MqyN6qOqu ++ rm /tmp/tmp.0WneetABkH /tmp/tmp.0MqyN6qOqu ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9mpppBoAyG +++ mktemp ++ local LAST_ERR=/tmp/tmp.uUjZuLrKzj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9mpppBoAyG ++ cat /tmp/tmp.uUjZuLrKzj ++ rm /tmp/tmp.9mpppBoAyG /tmp/tmp.uUjZuLrKzj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........ + [[ 1 == 1 ]] + log 'checking if statefulset created with expected config' + set +o xtrace [2026-02-03T14:39:21+0000] checking if statefulset created with expected config + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.f0jzAtZltZ/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-fs-21007", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.OQ477si197 ++ mktemp + local LAST_ERR=/tmp/tmp.C40m7QvmAX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OQ477si197 + cat /tmp/tmp.C40m7QvmAX + rm /tmp/tmp.OQ477si197 /tmp/tmp.C40m7QvmAX + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.f0jzAtZltZ/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.f0jzAtZltZ/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.f0jzAtZltZ/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml /tmp/tmp.f0jzAtZltZ/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2026-02-03T14:39:22+0000] compare_kubectl: statefulset/some-name-rs0 OK + log 'creating user' + set +o xtrace [2026-02-03T14:39:22+0000] creating user + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-21007 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z8MWOp47v3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.l48nODAQC5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z8MWOp47v3 ++ cat /tmp/tmp.l48nODAQC5 ++ rm /tmp/tmp.Z8MWOp47v3 /tmp/tmp.l48nODAQC5 ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Hv9JPARc3x ++ mktemp + local LAST_ERR=/tmp/tmp.SmwvpN2z2B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Hv9JPARc3x Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ac70a90f-80dd-42f6-bc0a-bfefde7710b8") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.SmwvpN2z2B + rm /tmp/tmp.Hv9JPARc3x /tmp/tmp.SmwvpN2z2B + return 0 + sleep 2 + log 'write initial data' + set +o xtrace [2026-02-03T14:39:27+0000] write initial data + write_data 100500 '' + local x=100500 + local find_prefix= + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hfmgjj5liX +++ mktemp ++ local LAST_ERR=/tmp/tmp.zpvvQid4lW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hfmgjj5liX ++ cat /tmp/tmp.zpvvQid4lW ++ rm /tmp/tmp.hfmgjj5liX /tmp/tmp.zpvvQid4lW ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PMRdVItkKi ++ mktemp + local LAST_ERR=/tmp/tmp.sDIkv8UEMT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PMRdVItkKi Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d4bf611a-7324-4dd1-9c3f-76ac0fcd76f5") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.sDIkv8UEMT + rm /tmp/tmp.PMRdVItkKi /tmp/tmp.sDIkv8UEMT + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:39:30+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zFagTF1lp9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nYbDo5aUee ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zFagTF1lp9 ++ cat /tmp/tmp.nYbDo5aUee ++ rm /tmp/tmp.zFagTF1lp9 /tmp/tmp.nYbDo5aUee ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CTNCm2mMiM ++ mktemp + local LAST_ERR=/tmp/tmp.u6YNbR5Z3J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CTNCm2mMiM + cat /tmp/tmp.u6YNbR5Z3J + rm /tmp/tmp.CTNCm2mMiM /tmp/tmp.u6YNbR5Z3J + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.f0jzAtZltZ/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-02-03T14:39:10.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-02-03T14:39:19.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-02-03T14:39:21.000+0000 I listening for the commands + desc 'CASE 1: Logical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: Logical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical + run_backup nfs backup-nfs-logical logical + local storage=nfs + local backup_name=backup-nfs-logical + local type=logical + log 'running backup backup-nfs-logical' + set +o xtrace [2026-02-03T14:39:37+0000] running backup backup-nfs-logical + yq eval '.metadata.name = "backup-nfs-logical" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.KO6WuDQCrA ++ mktemp + local LAST_ERR=/tmp/tmp.yBEk4pCiZD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KO6WuDQCrA perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical created + cat /tmp/tmp.yBEk4pCiZD + rm /tmp/tmp.KO6WuDQCrA /tmp/tmp.yBEk4pCiZD + return 0 + wait_backup backup-nfs-logical + local backup_name=backup-nfs-logical + local target_state=ready + set +o xtrace waiting for backup-nfs-logical to reach ready state.......OK + run_recovery_check backup-nfs-logical some-name -2nd '' + local backup=backup-nfs-logical + local cluster=some-name + local find_prefix_before=-2nd + local find_prefix_after= + write_data 100501 -2nd + local x=100501 + local find_prefix=-2nd + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NW4LYlVryb +++ mktemp ++ local LAST_ERR=/tmp/tmp.7byxpaH5qT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NW4LYlVryb ++ cat /tmp/tmp.7byxpaH5qT ++ rm /tmp/tmp.NW4LYlVryb /tmp/tmp.7byxpaH5qT ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.W8gfgnXcdw ++ mktemp + local LAST_ERR=/tmp/tmp.5PNpO0wXpI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W8gfgnXcdw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3a4d0e54-5351-4525-84be-641615793579") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.5PNpO0wXpI + rm /tmp/tmp.W8gfgnXcdw /tmp/tmp.5PNpO0wXpI + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:39:55+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WWFOLzOzT3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6BSwmSa0Dd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WWFOLzOzT3 ++ cat /tmp/tmp.6BSwmSa0Dd ++ rm /tmp/tmp.WWFOLzOzT3 /tmp/tmp.6BSwmSa0Dd ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nrD3zBYuwV ++ mktemp + local LAST_ERR=/tmp/tmp.XWLPvuuavV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nrD3zBYuwV + cat /tmp/tmp.XWLPvuuavV + rm /tmp/tmp.nrD3zBYuwV /tmp/tmp.XWLPvuuavV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find-2nd.json /tmp/tmp.f0jzAtZltZ/find-2nd + run_restore backup-nfs-logical + local backup_name=backup-nfs-logical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-logical' + set +o xtrace [2026-02-03T14:39:58+0000] running restore restore-backup-nfs-logical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-logical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PuzwTlay4D ++ mktemp + local LAST_ERR=/tmp/tmp.OrTuJmzLVr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PuzwTlay4D perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical created + cat /tmp/tmp.OrTuJmzLVr + rm /tmp/tmp.PuzwTlay4D /tmp/tmp.OrTuJmzLVr + return 0 + wait_restore backup-nfs-logical some-name + local backup_name=backup-nfs-logical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JIlcHjjr9E +++ mktemp ++ local LAST_ERR=/tmp/tmp.HM3LwgRNgj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JIlcHjjr9E ++ cat /tmp/tmp.HM3LwgRNgj ++ rm /tmp/tmp.JIlcHjjr9E /tmp/tmp.HM3LwgRNgj ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:40:24+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ local LAST_OUT=/tmp/tmp.pzKqlSXFfq +++ mktemp ++ local LAST_ERR=/tmp/tmp.4vKkJINBaS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pzKqlSXFfq ++ cat /tmp/tmp.4vKkJINBaS ++ rm /tmp/tmp.pzKqlSXFfq /tmp/tmp.4vKkJINBaS ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.U1XD28Kyp9 ++ mktemp + local LAST_ERR=/tmp/tmp.Q2Blpbzib1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U1XD28Kyp9 + cat /tmp/tmp.Q2Blpbzib1 + rm /tmp/tmp.U1XD28Kyp9 /tmp/tmp.Q2Blpbzib1 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.f0jzAtZltZ/find + desc 'CASE 2: Logical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: Logical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical-pitr + run_backup nfs backup-nfs-logical-pitr logical + local storage=nfs + local backup_name=backup-nfs-logical-pitr + local type=logical + log 'running backup backup-nfs-logical-pitr' + set +o xtrace [2026-02-03T14:40:27+0000] running backup backup-nfs-logical-pitr + yq eval '.metadata.name = "backup-nfs-logical-pitr" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ZPW2O6Ac16 ++ mktemp + local LAST_ERR=/tmp/tmp.2ITXYa17u9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZPW2O6Ac16 perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr created + cat /tmp/tmp.2ITXYa17u9 + rm /tmp/tmp.ZPW2O6Ac16 /tmp/tmp.2ITXYa17u9 + return 0 + wait_backup backup-nfs-logical-pitr + local backup_name=backup-nfs-logical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-logical-pitr to reach ready state.......OK + write_data 100502 -3rd + local x=100502 + local find_prefix=-3rd + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E4I3CsOlNS +++ mktemp ++ local LAST_ERR=/tmp/tmp.tuXRSQdU9T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E4I3CsOlNS ++ cat /tmp/tmp.tuXRSQdU9T ++ rm /tmp/tmp.E4I3CsOlNS /tmp/tmp.tuXRSQdU9T ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PnSTq4heyt ++ mktemp + local LAST_ERR=/tmp/tmp.XLJmn3hEIG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PnSTq4heyt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("673f7a42-4c09-4292-894b-b75360bc5aae") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.XLJmn3hEIG + rm /tmp/tmp.PnSTq4heyt /tmp/tmp.XLJmn3hEIG + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:40:45+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xND0EDLJXD +++ mktemp ++ local LAST_ERR=/tmp/tmp.BfBoVPtBiV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xND0EDLJXD ++ cat /tmp/tmp.BfBoVPtBiV ++ rm /tmp/tmp.xND0EDLJXD /tmp/tmp.BfBoVPtBiV ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.p9ORsugxZP ++ mktemp + local LAST_ERR=/tmp/tmp.n5Hnrqcd7m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p9ORsugxZP + cat /tmp/tmp.n5Hnrqcd7m + rm /tmp/tmp.p9ORsugxZP /tmp/tmp.n5Hnrqcd7m + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.f0jzAtZltZ/find-3rd + run_pitr_check backup-nfs-logical-pitr some-name -3rd + local backup=backup-nfs-logical-pitr + local cluster=some-name + local find_prefix=-3rd + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.glFStJGZkb +++ mktemp ++ local LAST_ERR=/tmp/tmp.5Ghf8MIXFp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.glFStJGZkb ++ cat /tmp/tmp.5Ghf8MIXFp ++ rm /tmp/tmp.glFStJGZkb /tmp/tmp.5Ghf8MIXFp ++ return 0 + local backup_last_write=1770129634 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ local LAST_OUT=/tmp/tmp.JepOschwji ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uLQEVPolZ3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JepOschwji +++ cat /tmp/tmp.uLQEVPolZ3 +++ rm /tmp/tmp.JepOschwji /tmp/tmp.uLQEVPolZ3 +++ return 0 ++ echo null + local last_chunk=null + [[ null -gt 1770129634 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QpXLntIv7H ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0z9mE4WRdl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QpXLntIv7H +++ cat /tmp/tmp.0z9mE4WRdl +++ rm /tmp/tmp.QpXLntIv7H /tmp/tmp.0z9mE4WRdl +++ return 0 ++ echo null + last_chunk=null + retries=1 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:40:53+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DboLI1GZtt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mdlVRNCodd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DboLI1GZtt +++ cat /tmp/tmp.mdlVRNCodd +++ rm /tmp/tmp.DboLI1GZtt /tmp/tmp.mdlVRNCodd +++ return 0 ++ echo null + last_chunk=null + retries=2 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:41:05+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PDC7RP6IqD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qEgvy82JhS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PDC7RP6IqD +++ cat /tmp/tmp.qEgvy82JhS +++ rm /tmp/tmp.PDC7RP6IqD /tmp/tmp.qEgvy82JhS +++ return 0 ++ echo null + last_chunk=null + retries=3 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:41:17+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x9pFEYE9Te ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HMc1SAltHm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.x9pFEYE9Te +++ cat /tmp/tmp.HMc1SAltHm +++ rm /tmp/tmp.x9pFEYE9Te /tmp/tmp.HMc1SAltHm +++ return 0 ++ echo null + last_chunk=null + retries=4 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:41:29+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.06LujOXokp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OIaUrYevFo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.06LujOXokp +++ cat /tmp/tmp.OIaUrYevFo +++ rm /tmp/tmp.06LujOXokp /tmp/tmp.OIaUrYevFo +++ return 0 ++ echo null + last_chunk=null + retries=5 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:41:41+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CzdfuoklLe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PfxXs6B68b +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.CzdfuoklLe +++ cat /tmp/tmp.PfxXs6B68b error: Internal error occurred: error sending request: Post "https://10.219.0.4:10250/exec/demand-backup-fs-21007/some-name-rs0-0/backup-agent?command=pbm&command=status&command=-o&command=json&error=1&output=1": dial timeout, backstop +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CzdfuoklLe +++ cat /tmp/tmp.PfxXs6B68b +++ rm /tmp/tmp.CzdfuoklLe /tmp/tmp.PfxXs6B68b +++ return 0 ++ echo null + last_chunk=null + retries=6 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:42:24+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.28yHvWCOoi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.085tX58cpA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.28yHvWCOoi +++ cat /tmp/tmp.085tX58cpA error: Internal error occurred: error sending request: Post "https://10.219.0.4:10250/exec/demand-backup-fs-21007/some-name-rs0-0/backup-agent?command=pbm&command=status&command=-o&command=json&error=1&output=1": dial timeout, backstop +++ sleep 0 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.28yHvWCOoi +++ cat /tmp/tmp.085tX58cpA +++ rm /tmp/tmp.28yHvWCOoi /tmp/tmp.085tX58cpA +++ return 0 ++ echo null + last_chunk=null + retries=7 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:43:08+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KG3aZBEvDl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hDUErCn6ib +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KG3aZBEvDl +++ cat /tmp/tmp.hDUErCn6ib +++ rm /tmp/tmp.KG3aZBEvDl /tmp/tmp.hDUErCn6ib +++ return 0 ++ echo null + last_chunk=null + retries=8 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:43:20+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 8 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2PTnvI9YEy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nNbKri8Qck +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2PTnvI9YEy +++ cat /tmp/tmp.nNbKri8Qck +++ rm /tmp/tmp.2PTnvI9YEy /tmp/tmp.nNbKri8Qck +++ return 0 ++ echo null + last_chunk=null + retries=9 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:43:32+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 9 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Kyxl9RsyWB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PLJfV0d3M8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Kyxl9RsyWB +++ cat /tmp/tmp.PLJfV0d3M8 +++ rm /tmp/tmp.Kyxl9RsyWB /tmp/tmp.PLJfV0d3M8 +++ return 0 ++ echo null + last_chunk=null + retries=10 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:43:44+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 10 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.u5ZMH8sGLh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MEbt11Nd7x +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.u5ZMH8sGLh +++ cat /tmp/tmp.MEbt11Nd7x +++ rm /tmp/tmp.u5ZMH8sGLh /tmp/tmp.MEbt11Nd7x +++ return 0 ++ echo null + last_chunk=null + retries=11 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:43:56+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 11 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fkXL2Dhv6C ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ef68m6e1CD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fkXL2Dhv6C +++ cat /tmp/tmp.Ef68m6e1CD +++ rm /tmp/tmp.fkXL2Dhv6C /tmp/tmp.Ef68m6e1CD +++ return 0 ++ echo null + last_chunk=null + retries=12 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:44:08+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 12 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t67Slo7guQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SPaj3Fytey +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.t67Slo7guQ +++ cat /tmp/tmp.SPaj3Fytey +++ rm /tmp/tmp.t67Slo7guQ /tmp/tmp.SPaj3Fytey +++ return 0 ++ echo null + last_chunk=null + retries=13 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:44:20+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 13 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JPF3puA38d ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G7SDQo4Rti +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JPF3puA38d +++ cat /tmp/tmp.G7SDQo4Rti +++ rm /tmp/tmp.JPF3puA38d /tmp/tmp.G7SDQo4Rti +++ return 0 ++ echo null + last_chunk=null + retries=14 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:44:32+0000] Waiting for last oplog chunk () to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ null -gt 1770129634 ]] + [[ 14 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XaVhAiKrfv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KzM0uA3gZ4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XaVhAiKrfv +++ cat /tmp/tmp.KzM0uA3gZ4 +++ rm /tmp/tmp.XaVhAiKrfv /tmp/tmp.KzM0uA3gZ4 +++ return 0 ++ echo 1770129874 + last_chunk=1770129874 + retries=15 ++ format_date 1770129874 ++ local timestamp=1770129874 +++ TZ=UTC +++ /usr/sbin/date -d@1770129874 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:44:34 ++ format_date 1770129634 ++ local timestamp=1770129634 +++ TZ=UTC +++ /usr/sbin/date -d@1770129634 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:40:34 + log 'Waiting for last oplog chunk (2026-02-03 14:44:34) to be greater than last write (2026-02-03 14:40:34)' + set +o xtrace [2026-02-03T14:44:44+0000] Waiting for last oplog chunk (2026-02-03 14:44:34) to be greater than last write (2026-02-03 14:40:34) + sleep 10 + [[ 1770129874 -gt 1770129634 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.w8AhehmaTa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KyM4x8YzGi ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.w8AhehmaTa ++++ cat /tmp/tmp.KyM4x8YzGi ++++ rm /tmp/tmp.w8AhehmaTa /tmp/tmp.KyM4x8YzGi ++++ return 0 +++ echo 1770129874 ++ format_date 1770129874 ++ local timestamp=1770129874 +++ TZ=UTC +++ /usr/sbin/date -d@1770129874 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:44:34 + local 'target_time=2026-02-03 14:44:34' + log 'dropping test collection' + set +o xtrace [2026-02-03T14:44:56+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ttR07q1D9g +++ mktemp ++ local LAST_ERR=/tmp/tmp.2W92d5WUVS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ttR07q1D9g ++ cat /tmp/tmp.2W92d5WUVS ++ rm /tmp/tmp.ttR07q1D9g /tmp/tmp.2W92d5WUVS ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.HCH2BnFR8w ++ mktemp + local LAST_ERR=/tmp/tmp.fUbn7VyHwe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HCH2BnFR8w Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d48ec10e-475e-4ba7-a7ae-21b274cf403d") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.fUbn7VyHwe + rm /tmp/tmp.HCH2BnFR8w /tmp/tmp.fUbn7VyHwe + return 0 + log 'checking pitr... backup: backup-nfs-logical-pitr target: 2026-02-03 14:44:34' + set +o xtrace [2026-02-03T14:44:58+0000] checking pitr... backup: backup-nfs-logical-pitr target: 2026-02-03 14:44:34 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-logical-pitr"' + yq eval '.spec.backupName = "backup-nfs-logical-pitr"' + kubectl_bin apply -f - + yq eval '.spec.pitr.date = "2026-02-03 14:44:34"' ++ mktemp + local LAST_OUT=/tmp/tmp.6shVjKho7G ++ mktemp + local LAST_ERR=/tmp/tmp.Qxp9mE7avz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6shVjKho7G perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical-pitr created + cat /tmp/tmp.Qxp9mE7avz + rm /tmp/tmp.6shVjKho7G /tmp/tmp.Qxp9mE7avz + return 0 + wait_restore backup-nfs-logical-pitr some-name + local backup_name=backup-nfs-logical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical-pitr to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.izdYHkQbRP +++ mktemp ++ local LAST_ERR=/tmp/tmp.qwgEnXwabv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.izdYHkQbRP ++ cat /tmp/tmp.qwgEnXwabv ++ rm /tmp/tmp.izdYHkQbRP /tmp/tmp.qwgEnXwabv ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:45:26+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ixtBxpDEcl +++ mktemp ++ local LAST_ERR=/tmp/tmp.MEZWOnnpN9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ixtBxpDEcl ++ cat /tmp/tmp.MEZWOnnpN9 ++ rm /tmp/tmp.ixtBxpDEcl /tmp/tmp.MEZWOnnpN9 ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.oyWYhalY62 ++ mktemp + local LAST_ERR=/tmp/tmp.anjGKNIYsA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oyWYhalY62 + cat /tmp/tmp.anjGKNIYsA + rm /tmp/tmp.oyWYhalY62 /tmp/tmp.anjGKNIYsA + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.f0jzAtZltZ/find-3rd + desc 'CASE 3: Physical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 3: Physical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical + run_backup nfs backup-nfs-physical physical + local storage=nfs + local backup_name=backup-nfs-physical + local type=physical + log 'running backup backup-nfs-physical' + set +o xtrace [2026-02-03T14:45:28+0000] running backup backup-nfs-physical + yq eval '.metadata.name = "backup-nfs-physical" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.V0uIGk9v8a ++ mktemp + local LAST_ERR=/tmp/tmp.SGSPUP1u0c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V0uIGk9v8a perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical created + cat /tmp/tmp.SGSPUP1u0c + rm /tmp/tmp.V0uIGk9v8a /tmp/tmp.SGSPUP1u0c + return 0 + wait_backup backup-nfs-physical + local backup_name=backup-nfs-physical + local target_state=ready + set +o xtrace waiting for backup-nfs-physical to reach ready state.......OK + run_recovery_check backup-nfs-physical some-name -4th -3rd + local backup=backup-nfs-physical + local cluster=some-name + local find_prefix_before=-4th + local find_prefix_after=-3rd + write_data 100501 -4th + local x=100501 + local find_prefix=-4th + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SI6Oav9Den +++ mktemp ++ local LAST_ERR=/tmp/tmp.52jqsdZawb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SI6Oav9Den ++ cat /tmp/tmp.52jqsdZawb ++ rm /tmp/tmp.SI6Oav9Den /tmp/tmp.52jqsdZawb ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.uloPL41z0K ++ mktemp + local LAST_ERR=/tmp/tmp.gB37zbAcvI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uloPL41z0K Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c7d2628b-70eb-4829-a7ee-d7162f150a5c") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.gB37zbAcvI + rm /tmp/tmp.uloPL41z0K /tmp/tmp.gB37zbAcvI + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 -4th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix=-4th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:45:46+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ local LAST_OUT=/tmp/tmp.sIIAG68tpE +++ mktemp ++ local LAST_ERR=/tmp/tmp.HMnzq81Nle ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sIIAG68tpE ++ cat /tmp/tmp.HMnzq81Nle ++ rm /tmp/tmp.sIIAG68tpE /tmp/tmp.HMnzq81Nle ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tMzeC7s4ti ++ mktemp + local LAST_ERR=/tmp/tmp.0HiukEiMha + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tMzeC7s4ti + cat /tmp/tmp.0HiukEiMha + rm /tmp/tmp.tMzeC7s4ti /tmp/tmp.0HiukEiMha + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find-4th.json /tmp/tmp.f0jzAtZltZ/find-4th + run_restore backup-nfs-physical + local backup_name=backup-nfs-physical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-physical' + set +o xtrace [2026-02-03T14:45:49+0000] running restore restore-backup-nfs-physical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-physical/' + kubectl_bin apply -f - + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-physical/' ++ mktemp + local LAST_OUT=/tmp/tmp.uUrMcW7RMW ++ mktemp + local LAST_ERR=/tmp/tmp.dwNKF00yPo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uUrMcW7RMW perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical created + cat /tmp/tmp.dwNKF00yPo + rm /tmp/tmp.uUrMcW7RMW /tmp/tmp.dwNKF00yPo + return 0 + wait_restore backup-nfs-physical some-name + local backup_name=backup-nfs-physical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical to reach state "ready" .....OK after 4 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PoHaH6FHxw +++ mktemp ++ local LAST_ERR=/tmp/tmp.x8HRJWSgrc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PoHaH6FHxw ++ cat /tmp/tmp.x8HRJWSgrc ++ rm /tmp/tmp.PoHaH6FHxw /tmp/tmp.x8HRJWSgrc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I6fTgv6lVl +++ mktemp ++ local LAST_ERR=/tmp/tmp.q63JshHp1f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I6fTgv6lVl ++ cat /tmp/tmp.q63JshHp1f ++ rm /tmp/tmp.I6fTgv6lVl /tmp/tmp.q63JshHp1f ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O56EnR0juX +++ mktemp ++ local LAST_ERR=/tmp/tmp.tZA4YyrEUY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O56EnR0juX ++ cat /tmp/tmp.tZA4YyrEUY ++ rm /tmp/tmp.O56EnR0juX /tmp/tmp.tZA4YyrEUY ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6jbhgathGE +++ mktemp ++ local LAST_ERR=/tmp/tmp.mhiUIBQszF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6jbhgathGE ++ cat /tmp/tmp.mhiUIBQszF ++ rm /tmp/tmp.6jbhgathGE /tmp/tmp.mhiUIBQszF ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rABpFaLSVT +++ mktemp ++ local LAST_ERR=/tmp/tmp.tBLdGrNs7X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rABpFaLSVT ++ cat /tmp/tmp.tBLdGrNs7X ++ rm /tmp/tmp.rABpFaLSVT /tmp/tmp.tBLdGrNs7X ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yhGGEToqzD +++ mktemp ++ local LAST_ERR=/tmp/tmp.eK4km5qENo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yhGGEToqzD ++ cat /tmp/tmp.eK4km5qENo ++ rm /tmp/tmp.yhGGEToqzD /tmp/tmp.eK4km5qENo ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IfF9nmxuYE +++ mktemp ++ local LAST_ERR=/tmp/tmp.bExEeVchPM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IfF9nmxuYE ++ cat /tmp/tmp.bExEeVchPM ++ rm /tmp/tmp.IfF9nmxuYE /tmp/tmp.bExEeVchPM ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.teGfb7r67M +++ mktemp ++ local LAST_ERR=/tmp/tmp.R3iLSFCkEr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.teGfb7r67M ++ cat /tmp/tmp.R3iLSFCkEr ++ rm /tmp/tmp.teGfb7r67M /tmp/tmp.R3iLSFCkEr ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4PkkFUzE2E +++ mktemp ++ local LAST_ERR=/tmp/tmp.SmG8zXJy42 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4PkkFUzE2E ++ cat /tmp/tmp.SmG8zXJy42 ++ rm /tmp/tmp.4PkkFUzE2E /tmp/tmp.SmG8zXJy42 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zkr0NcAD65 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XgFfPn6N2P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zkr0NcAD65 ++ cat /tmp/tmp.XgFfPn6N2P ++ rm /tmp/tmp.Zkr0NcAD65 /tmp/tmp.XgFfPn6N2P ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qXhviy4vP1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.42mwT1j8db ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qXhviy4vP1 ++ cat /tmp/tmp.42mwT1j8db ++ rm /tmp/tmp.qXhviy4vP1 /tmp/tmp.42mwT1j8db ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:51:59+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xAoDMd38gq +++ mktemp ++ local LAST_ERR=/tmp/tmp.JWywunoSNk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xAoDMd38gq ++ cat /tmp/tmp.JWywunoSNk ++ rm /tmp/tmp.xAoDMd38gq /tmp/tmp.JWywunoSNk ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fuvtKW7zk6 ++ mktemp + local LAST_ERR=/tmp/tmp.YR9bGjnL4x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fuvtKW7zk6 + cat /tmp/tmp.YR9bGjnL4x + rm /tmp/tmp.fuvtKW7zk6 /tmp/tmp.YR9bGjnL4x + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.f0jzAtZltZ/find-3rd + desc 'CASE 4: Physical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 4: Physical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical-pitr + run_backup nfs backup-nfs-physical-pitr physical + local storage=nfs + local backup_name=backup-nfs-physical-pitr + local type=physical + log 'running backup backup-nfs-physical-pitr' + set +o xtrace [2026-02-03T14:52:02+0000] running backup backup-nfs-physical-pitr + yq eval '.metadata.name = "backup-nfs-physical-pitr" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.oNCx0qI9OH ++ mktemp + local LAST_ERR=/tmp/tmp.nHAipAKbp8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oNCx0qI9OH perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr created + cat /tmp/tmp.nHAipAKbp8 + rm /tmp/tmp.oNCx0qI9OH /tmp/tmp.nHAipAKbp8 + return 0 + wait_backup backup-nfs-physical-pitr + local backup_name=backup-nfs-physical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-physical-pitr to reach ready state.......OK + write_data 100503 -5th + local x=100503 + local find_prefix=-5th + run_mongo 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rZIFsnCClz +++ mktemp ++ local LAST_ERR=/tmp/tmp.r0XjyGwtY3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rZIFsnCClz ++ cat /tmp/tmp.r0XjyGwtY3 ++ rm /tmp/tmp.rZIFsnCClz /tmp/tmp.r0XjyGwtY3 ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0EmzV9lpg7 ++ mktemp + local LAST_ERR=/tmp/tmp.Xuwcj0xHII + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0EmzV9lpg7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9c8252fc-802e-434b-8785-4e94d1f43fe4") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Xuwcj0xHII + rm /tmp/tmp.0EmzV9lpg7 /tmp/tmp.Xuwcj0xHII + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T14:52:19+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FcB8FLxLBn +++ mktemp ++ local LAST_ERR=/tmp/tmp.FWYVviv3ut ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FcB8FLxLBn ++ cat /tmp/tmp.FWYVviv3ut ++ rm /tmp/tmp.FcB8FLxLBn /tmp/tmp.FWYVviv3ut ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.pzioaS0ftk ++ mktemp + local LAST_ERR=/tmp/tmp.m91Mzx322e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pzioaS0ftk + cat /tmp/tmp.m91Mzx322e + rm /tmp/tmp.pzioaS0ftk /tmp/tmp.m91Mzx322e + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.f0jzAtZltZ/find-5th + run_pitr_check backup-nfs-physical-pitr some-name -5th + local backup=backup-nfs-physical-pitr + local cluster=some-name + local find_prefix=-5th + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VxjRSi3Bhh +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pm329EUXHU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VxjRSi3Bhh ++ cat /tmp/tmp.Pm329EUXHU ++ rm /tmp/tmp.VxjRSi3Bhh /tmp/tmp.Pm329EUXHU ++ return 0 + local backup_last_write=1770130326 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Dga4vwj4SV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OB1BgG0mUd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Dga4vwj4SV +++ cat /tmp/tmp.OB1BgG0mUd +++ rm /tmp/tmp.Dga4vwj4SV /tmp/tmp.OB1BgG0mUd +++ return 0 ++ echo 1770129904 + local last_chunk=1770129904 + [[ 1770129904 -gt 1770130326 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.puvlS8fvMv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oLu6WAEuyL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.puvlS8fvMv +++ cat /tmp/tmp.oLu6WAEuyL +++ rm /tmp/tmp.puvlS8fvMv /tmp/tmp.oLu6WAEuyL +++ return 0 ++ echo 1770129904 + last_chunk=1770129904 + retries=1 ++ format_date 1770129904 ++ local timestamp=1770129904 +++ TZ=UTC +++ /usr/sbin/date -d@1770129904 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:45:04 ++ format_date 1770130326 ++ local timestamp=1770130326 +++ TZ=UTC +++ /usr/sbin/date -d@1770130326 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:52:06 + log 'Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06)' + set +o xtrace [2026-02-03T14:52:28+0000] Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06) + sleep 10 + [[ 1770129904 -gt 1770130326 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wMsnZMcNfN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.M9Xa0SQDMf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wMsnZMcNfN +++ cat /tmp/tmp.M9Xa0SQDMf +++ rm /tmp/tmp.wMsnZMcNfN /tmp/tmp.M9Xa0SQDMf +++ return 0 ++ echo 1770129904 + last_chunk=1770129904 + retries=2 ++ format_date 1770129904 ++ local timestamp=1770129904 +++ TZ=UTC +++ /usr/sbin/date -d@1770129904 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:45:04 ++ format_date 1770130326 ++ local timestamp=1770130326 +++ TZ=UTC +++ /usr/sbin/date -d@1770130326 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:52:06 + log 'Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06)' + set +o xtrace [2026-02-03T14:52:40+0000] Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06) + sleep 10 + [[ 1770129904 -gt 1770130326 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lqu7M8W3gf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nEJF9CVYMi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lqu7M8W3gf +++ cat /tmp/tmp.nEJF9CVYMi +++ rm /tmp/tmp.lqu7M8W3gf /tmp/tmp.nEJF9CVYMi +++ return 0 ++ echo 1770129904 + last_chunk=1770129904 + retries=3 ++ format_date 1770129904 ++ local timestamp=1770129904 +++ TZ=UTC +++ /usr/sbin/date -d@1770129904 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:45:04 ++ format_date 1770130326 ++ local timestamp=1770130326 +++ TZ=UTC +++ /usr/sbin/date -d@1770130326 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:52:06 + log 'Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06)' + set +o xtrace [2026-02-03T14:52:52+0000] Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06) + sleep 10 + [[ 1770129904 -gt 1770130326 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RjglhUAqDz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4de8Vw6j2K +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RjglhUAqDz +++ cat /tmp/tmp.4de8Vw6j2K +++ rm /tmp/tmp.RjglhUAqDz /tmp/tmp.4de8Vw6j2K +++ return 0 ++ echo 1770129904 + last_chunk=1770129904 + retries=4 ++ format_date 1770129904 ++ local timestamp=1770129904 +++ TZ=UTC +++ /usr/sbin/date -d@1770129904 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:45:04 ++ format_date 1770130326 ++ local timestamp=1770130326 +++ TZ=UTC +++ /usr/sbin/date -d@1770130326 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:52:06 + log 'Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06)' + set +o xtrace [2026-02-03T14:53:03+0000] Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06) + sleep 10 + [[ 1770129904 -gt 1770130326 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WcJEFctAum ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4yIseHqEOM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WcJEFctAum +++ cat /tmp/tmp.4yIseHqEOM +++ rm /tmp/tmp.WcJEFctAum /tmp/tmp.4yIseHqEOM +++ return 0 ++ echo 1770129904 + last_chunk=1770129904 + retries=5 ++ format_date 1770129904 ++ local timestamp=1770129904 +++ TZ=UTC +++ /usr/sbin/date -d@1770129904 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:45:04 ++ format_date 1770130326 ++ local timestamp=1770130326 +++ TZ=UTC +++ /usr/sbin/date -d@1770130326 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:52:06 + log 'Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06)' + set +o xtrace [2026-02-03T14:53:15+0000] Waiting for last oplog chunk (2026-02-03 14:45:04) to be greater than last write (2026-02-03 14:52:06) + sleep 10 + [[ 1770129904 -gt 1770130326 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Qsjr9pIS3G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.udMtLNOByK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Qsjr9pIS3G +++ cat /tmp/tmp.udMtLNOByK +++ rm /tmp/tmp.Qsjr9pIS3G /tmp/tmp.udMtLNOByK +++ return 0 ++ echo 1770130396 + last_chunk=1770130396 + retries=6 ++ format_date 1770130396 ++ local timestamp=1770130396 +++ TZ=UTC +++ /usr/sbin/date -d@1770130396 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:53:16 ++ format_date 1770130326 ++ local timestamp=1770130326 +++ TZ=UTC +++ /usr/sbin/date -d@1770130326 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:52:06 + log 'Waiting for last oplog chunk (2026-02-03 14:53:16) to be greater than last write (2026-02-03 14:52:06)' + set +o xtrace [2026-02-03T14:53:27+0000] Waiting for last oplog chunk (2026-02-03 14:53:16) to be greater than last write (2026-02-03 14:52:06) + sleep 10 + [[ 1770130396 -gt 1770130326 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.UEBKwoxMkt +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9Ksid5DrD4 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.UEBKwoxMkt ++++ cat /tmp/tmp.9Ksid5DrD4 ++++ rm /tmp/tmp.UEBKwoxMkt /tmp/tmp.9Ksid5DrD4 ++++ return 0 +++ echo 1770130396 ++ format_date 1770130396 ++ local timestamp=1770130396 +++ TZ=UTC +++ /usr/sbin/date -d@1770130396 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-02-03 14:53:16 + local 'target_time=2026-02-03 14:53:16' + log 'dropping test collection' + set +o xtrace [2026-02-03T14:53:39+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1NIHpIVuaQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bwmz48gCpc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1NIHpIVuaQ ++ cat /tmp/tmp.Bwmz48gCpc ++ rm /tmp/tmp.1NIHpIVuaQ /tmp/tmp.Bwmz48gCpc ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.eX4Vx0JWRc ++ mktemp + local LAST_ERR=/tmp/tmp.BrrN4maEgX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eX4Vx0JWRc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-21007.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4ca43c3f-6121-42d2-8217-08f2fba8e8d3") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.BrrN4maEgX + rm /tmp/tmp.eX4Vx0JWRc /tmp/tmp.BrrN4maEgX + return 0 + log 'checking pitr... backup: backup-nfs-physical-pitr target: 2026-02-03 14:53:16' + set +o xtrace [2026-02-03T14:53:42+0000] checking pitr... backup: backup-nfs-physical-pitr target: 2026-02-03 14:53:16 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-physical-pitr"' + yq eval '.spec.backupName = "backup-nfs-physical-pitr"' + yq eval '.spec.pitr.date = "2026-02-03 14:53:16"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jl3QPbp9Mi ++ mktemp + local LAST_ERR=/tmp/tmp.cPnOd8qRNd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jl3QPbp9Mi perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical-pitr created + cat /tmp/tmp.cPnOd8qRNd + rm /tmp/tmp.jl3QPbp9Mi /tmp/tmp.cPnOd8qRNd + return 0 + wait_restore backup-nfs-physical-pitr some-name + local backup_name=backup-nfs-physical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical-pitr to reach state "ready" .....OK after 4 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I8zZib266Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.B5MaM6Zp58 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I8zZib266Y ++ cat /tmp/tmp.B5MaM6Zp58 ++ rm /tmp/tmp.I8zZib266Y /tmp/tmp.B5MaM6Zp58 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wDY9WQTuiv +++ mktemp ++ local LAST_ERR=/tmp/tmp.rVcv9uaDOU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wDY9WQTuiv ++ cat /tmp/tmp.rVcv9uaDOU ++ rm /tmp/tmp.wDY9WQTuiv /tmp/tmp.rVcv9uaDOU ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CUMZWUWsjZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.M7AVZ0oTyl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CUMZWUWsjZ ++ cat /tmp/tmp.M7AVZ0oTyl ++ rm /tmp/tmp.CUMZWUWsjZ /tmp/tmp.M7AVZ0oTyl ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qQ03VCd1uo +++ mktemp ++ local LAST_ERR=/tmp/tmp.YmAUPw6fpn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qQ03VCd1uo ++ cat /tmp/tmp.YmAUPw6fpn ++ rm /tmp/tmp.qQ03VCd1uo /tmp/tmp.YmAUPw6fpn ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YHzxMAICnM +++ mktemp ++ local LAST_ERR=/tmp/tmp.CoNYxsZ9AT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YHzxMAICnM ++ cat /tmp/tmp.CoNYxsZ9AT ++ rm /tmp/tmp.YHzxMAICnM /tmp/tmp.CoNYxsZ9AT ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Eu4Fv0pO5k +++ mktemp ++ local LAST_ERR=/tmp/tmp.57edhStQ6b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Eu4Fv0pO5k ++ cat /tmp/tmp.57edhStQ6b ++ rm /tmp/tmp.Eu4Fv0pO5k /tmp/tmp.57edhStQ6b ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2RYaOcbcIJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.YQaFiqifqk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2RYaOcbcIJ ++ cat /tmp/tmp.YQaFiqifqk ++ rm /tmp/tmp.2RYaOcbcIJ /tmp/tmp.YQaFiqifqk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t9BaDPaJhu +++ mktemp ++ local LAST_ERR=/tmp/tmp.trVyiROHil ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t9BaDPaJhu ++ cat /tmp/tmp.trVyiROHil ++ rm /tmp/tmp.t9BaDPaJhu /tmp/tmp.trVyiROHil ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-21007 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-02-03T15:00:03+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-21007 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-21007 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-21007 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u5bSfQPsYL +++ mktemp ++ local LAST_ERR=/tmp/tmp.l2OHUXgw8L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u5bSfQPsYL ++ cat /tmp/tmp.l2OHUXgw8L ++ rm /tmp/tmp.u5bSfQPsYL /tmp/tmp.l2OHUXgw8L ++ return 0 + local client_container=psmdb-client-86cb5d8484-jp9pb + kubectl_bin exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CuTpx2WGg9 ++ mktemp + local LAST_ERR=/tmp/tmp.aQIgAb98ic + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-86cb5d8484-jp9pb -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-21007.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CuTpx2WGg9 + cat /tmp/tmp.aQIgAb98ic + rm /tmp/tmp.CuTpx2WGg9 /tmp/tmp.aQIgAb98ic + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.f0jzAtZltZ/find-5th + destroy demand-backup-fs-21007 + local namespace=demand-backup-fs-21007 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.9chopTHhnn +++ mktemp ++ local LAST_ERR=/tmp/tmp.PAnljvF3sK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9chopTHhnn ++ cat /tmp/tmp.PAnljvF3sK ++ rm /tmp/tmp.9chopTHhnn /tmp/tmp.PAnljvF3sK ++ return 0 + '[' 4 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.YUM7Ay7JQC ++ mktemp + local LAST_ERR=/tmp/tmp.3Z8MiqPigM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YUM7Ay7JQC NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-nfs-logical some-name nfs /mnt/nfs/2026-02-03T14:39:40Z logical 39.49KB ready 20m 20m backup-nfs-logical-pitr some-name nfs /mnt/nfs/2026-02-03T14:40:29Z logical 43.33KB ready 19m 19m backup-nfs-physical some-name nfs /mnt/nfs/2026-02-03T14:45:31Z physical 1.52MB ready 14m 14m backup-nfs-physical-pitr some-name nfs /mnt/nfs/2026-02-03T14:52:04Z physical 907.50KB ready 7m55s 8m3s + cat /tmp/tmp.3Z8MiqPigM + rm /tmp/tmp.YUM7Ay7JQC /tmp/tmp.3Z8MiqPigM + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.n8O8WJB4MZ ++ mktemp + local LAST_ERR=/tmp/tmp.nkqSMOLEsp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n8O8WJB4MZ perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical" deleted from demand-backup-fs-21007 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical-pitr" deleted from demand-backup-fs-21007 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical" deleted from demand-backup-fs-21007 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical-pitr" deleted from demand-backup-fs-21007 namespace + cat /tmp/tmp.nkqSMOLEsp + rm /tmp/tmp.n8O8WJB4MZ /tmp/tmp.nkqSMOLEsp + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.BpUuLfWrAh ++ mktemp + local LAST_ERR=/tmp/tmp.NYSeyReWU4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BpUuLfWrAh customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.NYSeyReWU4 + rm /tmp/tmp.BpUuLfWrAh /tmp/tmp.NYSeyReWU4 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.2bvDQFHRzH ++ mktemp + local LAST_ERR=/tmp/tmp.MT37haS5uN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2bvDQFHRzH + cat /tmp/tmp.MT37haS5uN + rm /tmp/tmp.2bvDQFHRzH /tmp/tmp.MT37haS5uN + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.TvbLSfRNZp ++ mktemp + local LAST_ERR=/tmp/tmp.Y2yajZ9aD9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TvbLSfRNZp + cat /tmp/tmp.Y2yajZ9aD9 + rm /tmp/tmp.TvbLSfRNZp /tmp/tmp.Y2yajZ9aD9 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-21007 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.9GNvtj4t7X ++ mktemp + local LAST_ERR=/tmp/tmp.0itywWFs44 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9GNvtj4t7X customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.0itywWFs44 + rm /tmp/tmp.9GNvtj4t7X /tmp/tmp.0itywWFs44 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.6pKAmdvA6b ++ mktemp + local LAST_ERR=/tmp/tmp.PlZO1XLAlO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2226/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6pKAmdvA6b clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.PlZO1XLAlO + rm /tmp/tmp.6pKAmdvA6b /tmp/tmp.PlZO1XLAlO + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.AI057yEq9j ++ mktemp + local LAST_ERR=/tmp/tmp.YhhmDKFEVr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.AI057yEq9j + cat /tmp/tmp.YhhmDKFEVr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.AI057yEq9j + cat /tmp/tmp.YhhmDKFEVr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.AI057yEq9j + cat /tmp/tmp.YhhmDKFEVr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.AI057yEq9j + cat /tmp/tmp.YhhmDKFEVr Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.AI057yEq9j /tmp/tmp.YhhmDKFEVr + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-fs-21007 + rm -rf /tmp/tmp.f0jzAtZltZ + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.re77VuPKWB + local LAST_OUT=/tmp/tmp.WaipceG220 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Rz03oKBg08 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.vDfccGoj6i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-fs-21007 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator