Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/logs/demand-backup-fs.log Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=some-name + create_infra demand-backup-fs-20300 + local ns=demand-backup-fs-20300 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.8zssaKG1T3 ++ mktemp + local LAST_ERR=/tmp/tmp.jzF2pnsOau + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8zssaKG1T3 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.jzF2pnsOau + rm /tmp/tmp.8zssaKG1T3 /tmp/tmp.jzF2pnsOau + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-11679 backup-nfs-logical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-11679 backup-nfs-logical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-11679 backup-nfs-physical --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical patched + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-fs-11679 backup-nfs-physical-pitr --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.GjAR6CyTu2 ++ mktemp + local LAST_ERR=/tmp/tmp.2RhDLFxUqJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GjAR6CyTu2 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.2RhDLFxUqJ + rm /tmp/tmp.GjAR6CyTu2 /tmp/tmp.2RhDLFxUqJ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3oe7TA0OkZ ++ mktemp + local LAST_ERR=/tmp/tmp.HBq7avYDeA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3oe7TA0OkZ + cat /tmp/tmp.HBq7avYDeA + rm /tmp/tmp.3oe7TA0OkZ /tmp/tmp.HBq7avYDeA + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-11679 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xdzLoaRK2a ++ mktemp + local LAST_ERR=/tmp/tmp.iypmpdrGmv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xdzLoaRK2a customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.iypmpdrGmv + rm /tmp/tmp.xdzLoaRK2a /tmp/tmp.iypmpdrGmv + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.y8j9xRtHcC ++ mktemp + local LAST_ERR=/tmp/tmp.bHkrAxkctC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y8j9xRtHcC clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.bHkrAxkctC + rm /tmp/tmp.y8j9xRtHcC /tmp/tmp.bHkrAxkctC + return 0 + check_crd_for_deletion PR-2205-44b3f99f + local git_tag=PR-2205-44b3f99f ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2205-44b3f99f/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ADeBEYFL9n +++ mktemp ++ local LAST_ERR=/tmp/tmp.DZ4SMxZBIa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ADeBEYFL9n ++ cat /tmp/tmp.DZ4SMxZBIa Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ADeBEYFL9n ++ cat /tmp/tmp.DZ4SMxZBIa Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ADeBEYFL9n ++ cat /tmp/tmp.DZ4SMxZBIa Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.ADeBEYFL9n ++ cat /tmp/tmp.DZ4SMxZBIa Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.ADeBEYFL9n /tmp/tmp.DZ4SMxZBIa ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.b8jsfgwjUE + local LAST_OUT=/tmp/tmp.1qUjp3OqY9 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.NC1jUkREdB + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.u7iU7q9P0i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b8jsfgwjUE + cat /tmp/tmp.u7iU7q9P0i + rm /tmp/tmp.b8jsfgwjUE /tmp/tmp.u7iU7q9P0i + return 0 namespace "demand-backup-fs-11679" deleted namespace "storage" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1qUjp3OqY9 namespace "psmdb-operator" deleted + cat /tmp/tmp.NC1jUkREdB + rm /tmp/tmp.1qUjp3OqY9 /tmp/tmp.NC1jUkREdB + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tDWaqjcFza ++ mktemp + local LAST_ERR=/tmp/tmp.XHmWIK5NOx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tDWaqjcFza + cat /tmp/tmp.XHmWIK5NOx + rm /tmp/tmp.tDWaqjcFza /tmp/tmp.XHmWIK5NOx + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.9lp9y30kd4 ++ mktemp + local LAST_ERR=/tmp/tmp.JkiMErwm8a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9lp9y30kd4 namespace/psmdb-operator created + cat /tmp/tmp.JkiMErwm8a + rm /tmp/tmp.9lp9y30kd4 /tmp/tmp.JkiMErwm8a + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.pT0QCugNrI +++ mktemp ++ local LAST_ERR=/tmp/tmp.rzd4X4C4qb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pT0QCugNrI ++ cat /tmp/tmp.rzd4X4C4qb ++ rm /tmp/tmp.pT0QCugNrI /tmp/tmp.rzd4X4C4qb ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster12 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.w1FQ2d7zgt ++ mktemp + local LAST_ERR=/tmp/tmp.PNL8wugXAL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster12 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w1FQ2d7zgt Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster12" modified. + cat /tmp/tmp.PNL8wugXAL + rm /tmp/tmp.w1FQ2d7zgt /tmp/tmp.PNL8wugXAL + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.XL8TJAZkdF ++ mktemp + local LAST_ERR=/tmp/tmp.uF42bIslOr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XL8TJAZkdF customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.uF42bIslOr + rm /tmp/tmp.XL8TJAZkdF /tmp/tmp.uF42bIslOr + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.AHKCduVjv1 ++ mktemp + local LAST_ERR=/tmp/tmp.y2eOmzfqFb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AHKCduVjv1 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.y2eOmzfqFb + rm /tmp/tmp.AHKCduVjv1 /tmp/tmp.y2eOmzfqFb + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.nJbNyBJFtP ++ mktemp + local LAST_ERR=/tmp/tmp.tbJ7nHp7lM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nJbNyBJFtP deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.tbJ7nHp7lM + rm /tmp/tmp.nJbNyBJFtP /tmp/tmp.tbJ7nHp7lM + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ydfm5WbpTx +++ mktemp ++ local LAST_ERR=/tmp/tmp.i3taXUjRGW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ydfm5WbpTx ++ cat /tmp/tmp.i3taXUjRGW ++ rm /tmp/tmp.ydfm5WbpTx /tmp/tmp.i3taXUjRGW ++ return 0 + wait_operator_pod percona-server-mongodb-operator-54d9d7959c-4ttmb + local pod=percona-server-mongodb-operator-54d9d7959c-4ttmb + set +o xtrace waiting for pod/percona-server-mongodb-operator-54d9d7959c-4ttmb to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.OnU5p1noOc +++ mktemp ++ local LAST_ERR=/tmp/tmp.tn5Y5qAJnl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OnU5p1noOc ++ cat /tmp/tmp.tn5Y5qAJnl ++ rm /tmp/tmp.OnU5p1noOc /tmp/tmp.tn5Y5qAJnl ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-54d9d7959c-4ttmb ++ mktemp + local LAST_OUT=/tmp/tmp.qK3lTWIQXZ ++ mktemp + local LAST_ERR=/tmp/tmp.c01V1k7JRW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-54d9d7959c-4ttmb + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qK3lTWIQXZ + cat /tmp/tmp.c01V1k7JRW + rm /tmp/tmp.qK3lTWIQXZ /tmp/tmp.c01V1k7JRW + return 0 2026-01-21T13:08:16.708Z INFO setup Manager starting up {"gitCommit": "44b3f99f3dd18eea1a45598be537e3a62c41314e", "gitBranch": "PR-2205-44b3f99f", "buildTime": "", "goVersion": "go1.25.6", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-fs-20300 + local namespace=demand-backup-fs-20300 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-fs-20300' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-fs-20300 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-fs-20300 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.oGQUjFr7t2 ++ mktemp + local LAST_ERR=/tmp/tmp.zBzWub7iGw + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.gGzI4ovW7r ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.aUBQXsQoEn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-fs-20300 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oGQUjFr7t2 + cat /tmp/tmp.zBzWub7iGw + rm /tmp/tmp.oGQUjFr7t2 /tmp/tmp.zBzWub7iGw + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gGzI4ovW7r + cat /tmp/tmp.aUBQXsQoEn + rm /tmp/tmp.gGzI4ovW7r /tmp/tmp.aUBQXsQoEn + return 0 + kubectl_bin wait --for=delete namespace demand-backup-fs-20300 ++ mktemp + local LAST_OUT=/tmp/tmp.gpsRQTsA3b ++ mktemp + local LAST_ERR=/tmp/tmp.lYBAZYN58d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-fs-20300 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gpsRQTsA3b + cat /tmp/tmp.lYBAZYN58d + rm /tmp/tmp.gpsRQTsA3b /tmp/tmp.lYBAZYN58d + return 0 + desc 'create namespace demand-backup-fs-20300' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-fs-20300 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-fs-20300 ++ mktemp + local LAST_OUT=/tmp/tmp.w1dzBSDal1 ++ mktemp + local LAST_ERR=/tmp/tmp.omH6h4cIbk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-fs-20300 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w1dzBSDal1 namespace/demand-backup-fs-20300 created + cat /tmp/tmp.omH6h4cIbk + rm /tmp/tmp.w1dzBSDal1 /tmp/tmp.omH6h4cIbk + return 0 + set_kube_ctx demand-backup-fs-20300 + local namespace=demand-backup-fs-20300 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6qQdezoQin +++ mktemp ++ local LAST_ERR=/tmp/tmp.TxiRO9FP9Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6qQdezoQin ++ cat /tmp/tmp.TxiRO9FP9Y ++ rm /tmp/tmp.6qQdezoQin /tmp/tmp.TxiRO9FP9Y ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster12 --namespace=demand-backup-fs-20300 ++ mktemp + local LAST_OUT=/tmp/tmp.w54pWMAcWa ++ mktemp + local LAST_ERR=/tmp/tmp.1qMPalx3AG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster12 --namespace=demand-backup-fs-20300 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w54pWMAcWa Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster12" modified. + cat /tmp/tmp.1qMPalx3AG + rm /tmp/tmp.w54pWMAcWa /tmp/tmp.1qMPalx3AG + return 0 + kubectl_bin delete ns storage ++ mktemp + local LAST_OUT=/tmp/tmp.IOjMKokcox ++ mktemp + local LAST_ERR=/tmp/tmp.zJ05TRKIvO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.IOjMKokcox + cat /tmp/tmp.zJ05TRKIvO Error from server (NotFound): namespaces "storage" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.IOjMKokcox + cat /tmp/tmp.zJ05TRKIvO Error from server (NotFound): namespaces "storage" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete ns storage + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.IOjMKokcox + cat /tmp/tmp.zJ05TRKIvO Error from server (NotFound): namespaces "storage" not found + sleep 8 + cat /tmp/tmp.IOjMKokcox + cat /tmp/tmp.zJ05TRKIvO Error from server (NotFound): namespaces "storage" not found + rm /tmp/tmp.IOjMKokcox /tmp/tmp.zJ05TRKIvO + return 1 + : + [[ 1 != 1 ]] + uid=1001 + [[ -n '' ]] + log 'deploying NFS server' + set +o xtrace [2026-01-21T13:09:06+0000] deploying NFS server + deploy_nfs_server 1001 + local uid=1001 + kubectl_bin create namespace storage ++ mktemp + local LAST_OUT=/tmp/tmp.ZJiQffMyZ4 ++ mktemp + local LAST_ERR=/tmp/tmp.yty1gc8HWq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace storage + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZJiQffMyZ4 namespace/storage created + cat /tmp/tmp.yty1gc8HWq + rm /tmp/tmp.ZJiQffMyZ4 /tmp/tmp.yty1gc8HWq + return 0 + kubectl_bin apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/nfs-server.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Xv0cwbPkFV ++ mktemp + local LAST_ERR=/tmp/tmp.JnRJ2MPx3L + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n storage -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/nfs-server.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xv0cwbPkFV serviceaccount/nfs-server created rolebinding.rbac.authorization.k8s.io/system:openshift:scc:privileged created persistentvolumeclaim/nfs-pvc created deployment.apps/nfs-server created service/nfs-service created + cat /tmp/tmp.JnRJ2MPx3L + rm /tmp/tmp.Xv0cwbPkFV /tmp/tmp.JnRJ2MPx3L + return 0 + sleep 5 ++ kubectl_bin get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7oFRswSbPt +++ mktemp ++ local LAST_ERR=/tmp/tmp.5BNTiAci3v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -n storage -l app=nfs-server -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7oFRswSbPt ++ cat /tmp/tmp.5BNTiAci3v ++ rm /tmp/tmp.7oFRswSbPt /tmp/tmp.5BNTiAci3v ++ return 0 + local nfsPod=nfs-server-64ccf79cf6-mr4jb ++ kubectl_bin get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DI7xrlwub1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.t3uZcAJ0K3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DI7xrlwub1 ++ cat /tmp/tmp.t3uZcAJ0K3 ++ rm /tmp/tmp.DI7xrlwub1 /tmp/tmp.t3uZcAJ0K3 ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-64ccf79cf6-mr4jb to start Running' + set +o xtrace [2026-01-21T13:09:17+0000] Waiting for nfs-server-64ccf79cf6-mr4jb to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eYaYfYPO9M +++ mktemp ++ local LAST_ERR=/tmp/tmp.oFo0jDAdVr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eYaYfYPO9M ++ cat /tmp/tmp.oFo0jDAdVr ++ rm /tmp/tmp.eYaYfYPO9M /tmp/tmp.oFo0jDAdVr ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-64ccf79cf6-mr4jb to start Running' + set +o xtrace [2026-01-21T13:09:20+0000] Waiting for nfs-server-64ccf79cf6-mr4jb to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5jrHlCDfi3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eJZBma6DCs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5jrHlCDfi3 ++ cat /tmp/tmp.eJZBma6DCs ++ rm /tmp/tmp.5jrHlCDfi3 /tmp/tmp.eJZBma6DCs ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-64ccf79cf6-mr4jb to start Running' + set +o xtrace [2026-01-21T13:09:22+0000] Waiting for nfs-server-64ccf79cf6-mr4jb to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3Yn8mwWxkA +++ mktemp ++ local LAST_ERR=/tmp/tmp.eaafExGwHn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3Yn8mwWxkA ++ cat /tmp/tmp.eaafExGwHn ++ rm /tmp/tmp.3Yn8mwWxkA /tmp/tmp.eaafExGwHn ++ return 0 + [[ Pending == \R\u\n\n\i\n\g ]] + log 'Waiting for nfs-server-64ccf79cf6-mr4jb to start Running' + set +o xtrace [2026-01-21T13:09:24+0000] Waiting for nfs-server-64ccf79cf6-mr4jb to start Running + sleep 1 ++ kubectl_bin get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.33KInsedDv +++ mktemp ++ local LAST_ERR=/tmp/tmp.JVkZS35gwq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod nfs-server-64ccf79cf6-mr4jb -n storage -o 'jsonpath={.status.phase}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.33KInsedDv ++ cat /tmp/tmp.JVkZS35gwq ++ rm /tmp/tmp.33KInsedDv /tmp/tmp.JVkZS35gwq ++ return 0 + [[ Running == \R\u\n\n\i\n\g ]] + kubectl_bin exec -n storage nfs-server-64ccf79cf6-mr4jb -- mkdir /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.OAD2XhUozL ++ mktemp + local LAST_ERR=/tmp/tmp.lQ4VkOVFuP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-64ccf79cf6-mr4jb -- mkdir /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OAD2XhUozL + cat /tmp/tmp.lQ4VkOVFuP + rm /tmp/tmp.OAD2XhUozL /tmp/tmp.lQ4VkOVFuP + return 0 + kubectl_bin exec -n storage nfs-server-64ccf79cf6-mr4jb -- chown 1001:1001 /exports/psmdb-some-name-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.0dCCu7erQb ++ mktemp + local LAST_ERR=/tmp/tmp.bXMpLiQPAR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -n storage nfs-server-64ccf79cf6-mr4jb -- chown 1001:1001 /exports/psmdb-some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0dCCu7erQb + cat /tmp/tmp.bXMpLiQPAR + rm /tmp/tmp.0dCCu7erQb /tmp/tmp.bXMpLiQPAR + return 0 + log 'creating secrets and start client' + set +o xtrace [2026-01-21T13:09:28+0000] creating secrets and start client + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.sWBjDzV4Jw ++ mktemp + local LAST_ERR=/tmp/tmp.w5JfMWY5qG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sWBjDzV4Jw secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.w5JfMWY5qG + rm /tmp/tmp.sWBjDzV4Jw /tmp/tmp.w5JfMWY5qG + return 0 + [[ -n '' ]] + log 'creating PSMDB cluster some-name' + set +o xtrace [2026-01-21T13:09:30+0000] creating PSMDB cluster some-name + [[ 1 != 1 ]] + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/some-name.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f"' + local LAST_OUT=/tmp/tmp.V2VfH3d9y2 + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-fs-20300/g ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.efsGeq7PcQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V2VfH3d9y2 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.efsGeq7PcQ + rm /tmp/tmp.V2VfH3d9y2 /tmp/tmp.efsGeq7PcQ + return 0 + log 'wait for all 3 pods to start' + set +o xtrace [2026-01-21T13:09:33+0000] wait for all 3 pods to start + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.584PWvcBHb +++ mktemp ++ local LAST_ERR=/tmp/tmp.mxIrVvIYUf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.584PWvcBHb ++ cat /tmp/tmp.mxIrVvIYUf ++ rm /tmp/tmp.584PWvcBHb /tmp/tmp.mxIrVvIYUf ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CdpCsnUbns +++ mktemp ++ local LAST_ERR=/tmp/tmp.uQhzOqdqo8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CdpCsnUbns ++ cat /tmp/tmp.uQhzOqdqo8 ++ rm /tmp/tmp.CdpCsnUbns /tmp/tmp.uQhzOqdqo8 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wexSx6zGpr +++ mktemp ++ local LAST_ERR=/tmp/tmp.1SxPaN0KRV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wexSx6zGpr ++ cat /tmp/tmp.1SxPaN0KRV ++ rm /tmp/tmp.wexSx6zGpr /tmp/tmp.1SxPaN0KRV ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................... + [[ 1 == 1 ]] + log 'checking if statefulset created with expected config' + set +o xtrace [2026-01-21T13:11:36+0000] checking if statefulset created with expected config + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.3Kihw2RMl4/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("demand-backup-fs-20300", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.JyDdTImyOP ++ mktemp + local LAST_ERR=/tmp/tmp.Y9wH7DwZrB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JyDdTImyOP + cat /tmp/tmp.Y9wH7DwZrB + rm /tmp/tmp.JyDdTImyOP /tmp/tmp.Y9wH7DwZrB + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.3Kihw2RMl4/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.3Kihw2RMl4/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.3Kihw2RMl4/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/statefulset_some-name-rs0.yml /tmp/tmp.3Kihw2RMl4/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2026-01-21T13:11:37+0000] compare_kubectl: statefulset/some-name-rs0 OK + log 'creating user' + set +o xtrace [2026-01-21T13:11:37+0000] creating user + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-20300 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EviOSYangq +++ mktemp ++ local LAST_ERR=/tmp/tmp.tvFH2hB4Sd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EviOSYangq ++ cat /tmp/tmp.tvFH2hB4Sd ++ rm /tmp/tmp.EviOSYangq /tmp/tmp.tvFH2hB4Sd ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.qNQe9s6et2 ++ mktemp + local LAST_ERR=/tmp/tmp.mzDMx3QtEm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qNQe9s6et2 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f9cb8baa-d093-40f5-9027-09a29da89c57") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.mzDMx3QtEm + rm /tmp/tmp.qNQe9s6et2 /tmp/tmp.mzDMx3QtEm + return 0 + sleep 2 + log 'write initial data' + set +o xtrace [2026-01-21T13:11:42+0000] write initial data + write_data 100500 '' + local x=100500 + local find_prefix= + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6kv1BIrubi +++ mktemp ++ local LAST_ERR=/tmp/tmp.AuH87gPDDP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6kv1BIrubi ++ cat /tmp/tmp.AuH87gPDDP ++ rm /tmp/tmp.6kv1BIrubi /tmp/tmp.AuH87gPDDP ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.WUHYL4VzWL ++ mktemp + local LAST_ERR=/tmp/tmp.aYcJpoUV7l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WUHYL4VzWL Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3e66f72a-3b75-483d-9193-df2a8498a74c") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.aYcJpoUV7l + rm /tmp/tmp.WUHYL4VzWL /tmp/tmp.aYcJpoUV7l + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:11:45+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XBJQ3ahcEZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7wFztKiTOA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XBJQ3ahcEZ ++ cat /tmp/tmp.7wFztKiTOA ++ rm /tmp/tmp.XBJQ3ahcEZ /tmp/tmp.7wFztKiTOA ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BwxTXMeuMz ++ mktemp + local LAST_ERR=/tmp/tmp.YNQYB0RjpN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BwxTXMeuMz + cat /tmp/tmp.YNQYB0RjpN + rm /tmp/tmp.BwxTXMeuMz /tmp/tmp.YNQYB0RjpN + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.3Kihw2RMl4/find + wait_backup_agent some-name-rs0-0 + local agent_pod=some-name-rs0-0 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-0...2026-01-21T13:10:59.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-1 + local agent_pod=some-name-rs0-1 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-1...2026-01-21T13:11:34.000+0000 I listening for the commands + wait_backup_agent some-name-rs0-2 + local agent_pod=some-name-rs0-2 + set +o xtrace waiting for pbm-agent to be ready in some-name-rs0-2...2026-01-21T13:11:37.000+0000 I listening for the commands + desc 'CASE 1: Logical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: Logical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical + run_backup nfs backup-nfs-logical logical + local storage=nfs + local backup_name=backup-nfs-logical + local type=logical + log 'running backup backup-nfs-logical' + set +o xtrace [2026-01-21T13:11:51+0000] running backup backup-nfs-logical + yq eval '.metadata.name = "backup-nfs-logical" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.r46wb7NxMA ++ mktemp + local LAST_ERR=/tmp/tmp.RrHzfo47WO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r46wb7NxMA perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical created + cat /tmp/tmp.RrHzfo47WO + rm /tmp/tmp.r46wb7NxMA /tmp/tmp.RrHzfo47WO + return 0 + wait_backup backup-nfs-logical + local backup_name=backup-nfs-logical + local target_state=ready + set +o xtrace waiting for backup-nfs-logical to reach ready state.......OK + run_recovery_check backup-nfs-logical some-name -2nd '' + local backup=backup-nfs-logical + local cluster=some-name + local find_prefix_before=-2nd + local find_prefix_after= + write_data 100501 -2nd + local x=100501 + local find_prefix=-2nd + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o0wJNQz8CJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.yYW7jgHAml ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o0wJNQz8CJ ++ cat /tmp/tmp.yYW7jgHAml ++ rm /tmp/tmp.o0wJNQz8CJ /tmp/tmp.yYW7jgHAml ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fqG43q2QmN ++ mktemp + local LAST_ERR=/tmp/tmp.KIE4vXvkoQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fqG43q2QmN Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0662db45-61cd-45c1-b645-bbadac62e4c7") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.KIE4vXvkoQ + rm /tmp/tmp.fqG43q2QmN /tmp/tmp.KIE4vXvkoQ + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 -2nd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix=-2nd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:12:07+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t2RUpnCl0a +++ mktemp ++ local LAST_ERR=/tmp/tmp.lIWadqOdZ9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t2RUpnCl0a ++ cat /tmp/tmp.lIWadqOdZ9 ++ rm /tmp/tmp.t2RUpnCl0a /tmp/tmp.lIWadqOdZ9 ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kBT7zymsP2 ++ mktemp + local LAST_ERR=/tmp/tmp.gyq1yx47Mq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kBT7zymsP2 + cat /tmp/tmp.gyq1yx47Mq + rm /tmp/tmp.kBT7zymsP2 /tmp/tmp.gyq1yx47Mq + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find-2nd.json /tmp/tmp.3Kihw2RMl4/find-2nd + run_restore backup-nfs-logical + local backup_name=backup-nfs-logical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-logical' + set +o xtrace [2026-01-21T13:12:10+0000] running restore restore-backup-nfs-logical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-logical/' + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-logical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.QRqLN2QSzZ ++ mktemp + local LAST_ERR=/tmp/tmp.owu5iQ8jjJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QRqLN2QSzZ perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical created + cat /tmp/tmp.owu5iQ8jjJ + rm /tmp/tmp.QRqLN2QSzZ /tmp/tmp.owu5iQ8jjJ + return 0 + wait_restore backup-nfs-logical some-name + local backup_name=backup-nfs-logical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2JDdWjHTy9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZUWQIDoSYg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2JDdWjHTy9 ++ cat /tmp/tmp.ZUWQIDoSYg ++ rm /tmp/tmp.2JDdWjHTy9 /tmp/tmp.ZUWQIDoSYg ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 '' .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix= + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:12:32+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VlWzpV3HGk +++ mktemp ++ local LAST_ERR=/tmp/tmp.znAMO6N2pJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VlWzpV3HGk ++ cat /tmp/tmp.znAMO6N2pJ ++ rm /tmp/tmp.VlWzpV3HGk /tmp/tmp.znAMO6N2pJ ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ZD0mvNEUfs ++ mktemp + local LAST_ERR=/tmp/tmp.YjEg9r7AvA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZD0mvNEUfs + cat /tmp/tmp.YjEg9r7AvA + rm /tmp/tmp.ZD0mvNEUfs /tmp/tmp.YjEg9r7AvA + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find.json /tmp/tmp.3Kihw2RMl4/find + desc 'CASE 2: Logical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: Logical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-logical-pitr + run_backup nfs backup-nfs-logical-pitr logical + local storage=nfs + local backup_name=backup-nfs-logical-pitr + local type=logical + log 'running backup backup-nfs-logical-pitr' + set +o xtrace [2026-01-21T13:12:35+0000] running backup backup-nfs-logical-pitr + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-nfs-logical-pitr" | .spec.storageName = "nfs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/backup-nfs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.n3C0IKDkkN ++ mktemp + local LAST_ERR=/tmp/tmp.uRN1vdEGmb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n3C0IKDkkN perconaservermongodbbackup.psmdb.percona.com/backup-nfs-logical-pitr created + cat /tmp/tmp.uRN1vdEGmb + rm /tmp/tmp.n3C0IKDkkN /tmp/tmp.uRN1vdEGmb + return 0 + wait_backup backup-nfs-logical-pitr + local backup_name=backup-nfs-logical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-logical-pitr to reach ready state.......OK + write_data 100502 -3rd + local x=100502 + local find_prefix=-3rd + run_mongo 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6LMkHgKDJb +++ mktemp ++ local LAST_ERR=/tmp/tmp.Eja9RkmHYM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6LMkHgKDJb ++ cat /tmp/tmp.Eja9RkmHYM ++ rm /tmp/tmp.6LMkHgKDJb /tmp/tmp.Eja9RkmHYM ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.kxR10bORKm ++ mktemp + local LAST_ERR=/tmp/tmp.bsHGRjDqoD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kxR10bORKm Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("80118fc3-3808-4db0-b126-4f088ff47305") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.bsHGRjDqoD + rm /tmp/tmp.kxR10bORKm /tmp/tmp.bsHGRjDqoD + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:12:52+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rjjq96wTLo +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fyxi9dq8tq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rjjq96wTLo ++ cat /tmp/tmp.Fyxi9dq8tq ++ rm /tmp/tmp.rjjq96wTLo /tmp/tmp.Fyxi9dq8tq ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.WgZS0QQ1yH ++ mktemp + local LAST_ERR=/tmp/tmp.7VpUDX1MC0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WgZS0QQ1yH + cat /tmp/tmp.7VpUDX1MC0 + rm /tmp/tmp.WgZS0QQ1yH /tmp/tmp.7VpUDX1MC0 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.3Kihw2RMl4/find-3rd + run_pitr_check backup-nfs-logical-pitr some-name -3rd + local backup=backup-nfs-logical-pitr + local cluster=some-name + local find_prefix=-3rd + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oWorlJULNP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jqz2pg5LkA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oWorlJULNP ++ cat /tmp/tmp.Jqz2pg5LkA ++ rm /tmp/tmp.oWorlJULNP /tmp/tmp.Jqz2pg5LkA ++ return 0 + local backup_last_write=1769001162 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eoJwq6iATQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CbLq87MdA5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eoJwq6iATQ +++ cat /tmp/tmp.CbLq87MdA5 +++ rm /tmp/tmp.eoJwq6iATQ /tmp/tmp.CbLq87MdA5 +++ return 0 ++ echo null + local last_chunk=null + [[ null -gt 1769001162 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ayF7DaGMfB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.V3N6A7F6Nt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ayF7DaGMfB +++ cat /tmp/tmp.V3N6A7F6Nt +++ rm /tmp/tmp.ayF7DaGMfB /tmp/tmp.V3N6A7F6Nt +++ return 0 ++ echo null + last_chunk=null + retries=1 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:13:00+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ null -gt 1769001162 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xcXJ5Wfb6R ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kw7NBgQN56 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xcXJ5Wfb6R +++ cat /tmp/tmp.kw7NBgQN56 +++ rm /tmp/tmp.xcXJ5Wfb6R /tmp/tmp.kw7NBgQN56 +++ return 0 ++ echo null + last_chunk=null + retries=2 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:13:12+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ null -gt 1769001162 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qlCvmPBc8R ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TSIX4PoWjz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qlCvmPBc8R +++ cat /tmp/tmp.TSIX4PoWjz +++ rm /tmp/tmp.qlCvmPBc8R /tmp/tmp.TSIX4PoWjz +++ return 0 ++ echo null + last_chunk=null + retries=3 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:13:24+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ null -gt 1769001162 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GdUf2SQuor ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SQPojst13c +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GdUf2SQuor +++ cat /tmp/tmp.SQPojst13c +++ rm /tmp/tmp.GdUf2SQuor /tmp/tmp.SQPojst13c +++ return 0 ++ echo null + last_chunk=null + retries=4 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:13:36+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ null -gt 1769001162 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3RGVoXe43g ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TzzGq7GSmC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3RGVoXe43g +++ cat /tmp/tmp.TzzGq7GSmC +++ rm /tmp/tmp.3RGVoXe43g /tmp/tmp.TzzGq7GSmC +++ return 0 ++ echo null + last_chunk=null + retries=5 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:13:47+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ null -gt 1769001162 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PlqOR0YTdZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wnn5K23xMh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PlqOR0YTdZ +++ cat /tmp/tmp.wnn5K23xMh +++ rm /tmp/tmp.PlqOR0YTdZ /tmp/tmp.wnn5K23xMh +++ return 0 ++ echo null + last_chunk=null + retries=6 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:13:59+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ null -gt 1769001162 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.E25iT43jJ3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Rl9Bmx3Nyf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.E25iT43jJ3 +++ cat /tmp/tmp.Rl9Bmx3Nyf +++ rm /tmp/tmp.E25iT43jJ3 /tmp/tmp.Rl9Bmx3Nyf +++ return 0 ++ echo null + last_chunk=null + retries=7 ++ format_date null ++ local timestamp=null +++ TZ=UTC +++ /usr/sbin/date -d@null '+%Y-%m-%d %H:%M:%S' date: invalid date ‘@null’ ++ echo ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:14:11+0000] Waiting for last oplog chunk () to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ null -gt 1769001162 ]] + [[ 7 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0iT5b8Z4KZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AzcZXlknnK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0iT5b8Z4KZ +++ cat /tmp/tmp.AzcZXlknnK +++ rm /tmp/tmp.0iT5b8Z4KZ /tmp/tmp.AzcZXlknnK +++ return 0 ++ echo 1769001256 + last_chunk=1769001256 + retries=8 ++ format_date 1769001256 ++ local timestamp=1769001256 +++ TZ=UTC +++ /usr/sbin/date -d@1769001256 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:16 ++ format_date 1769001162 ++ local timestamp=1769001162 +++ TZ=UTC +++ /usr/sbin/date -d@1769001162 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:12:42 + log 'Waiting for last oplog chunk (2026-01-21 13:14:16) to be greater than last write (2026-01-21 13:12:42)' + set +o xtrace [2026-01-21T13:14:23+0000] Waiting for last oplog chunk (2026-01-21 13:14:16) to be greater than last write (2026-01-21 13:12:42) + sleep 10 + [[ 1769001256 -gt 1769001162 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LoUvAfxiq3 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.RRYvs1u3h1 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.LoUvAfxiq3 ++++ cat /tmp/tmp.RRYvs1u3h1 ++++ rm /tmp/tmp.LoUvAfxiq3 /tmp/tmp.RRYvs1u3h1 ++++ return 0 +++ echo 1769001256 ++ format_date 1769001256 ++ local timestamp=1769001256 +++ TZ=UTC +++ /usr/sbin/date -d@1769001256 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:16 + local 'target_time=2026-01-21 13:14:16' + log 'dropping test collection' + set +o xtrace [2026-01-21T13:14:35+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HoUb58TykE +++ mktemp ++ local LAST_ERR=/tmp/tmp.IVeFsZ6x08 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HoUb58TykE ++ cat /tmp/tmp.IVeFsZ6x08 ++ rm /tmp/tmp.HoUb58TykE /tmp/tmp.IVeFsZ6x08 ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fBmxwS9qmi ++ mktemp + local LAST_ERR=/tmp/tmp.T0LE9HAe0n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fBmxwS9qmi Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a72556d6-8243-466f-8652-e0e547b60937") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.T0LE9HAe0n + rm /tmp/tmp.fBmxwS9qmi /tmp/tmp.T0LE9HAe0n + return 0 + log 'checking pitr... backup: backup-nfs-logical-pitr target: 2026-01-21 13:14:16' + set +o xtrace [2026-01-21T13:14:38+0000] checking pitr... backup: backup-nfs-logical-pitr target: 2026-01-21 13:14:16 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-logical-pitr"' + yq eval '.spec.backupName = "backup-nfs-logical-pitr"' + kubectl_bin apply -f - + yq eval '.spec.pitr.date = "2026-01-21 13:14:16"' ++ mktemp + local LAST_OUT=/tmp/tmp.4Kx9GeFfYE ++ mktemp + local LAST_ERR=/tmp/tmp.FgL8ne27A1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4Kx9GeFfYE perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-logical-pitr created + cat /tmp/tmp.FgL8ne27A1 + rm /tmp/tmp.4Kx9GeFfYE /tmp/tmp.FgL8ne27A1 + return 0 + wait_restore backup-nfs-logical-pitr some-name + local backup_name=backup-nfs-logical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-logical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-logical-pitr to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AVq9QyJsLY +++ mktemp ++ local LAST_ERR=/tmp/tmp.ruaxH0C9ou ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AVq9QyJsLY ++ cat /tmp/tmp.ruaxH0C9ou ++ rm /tmp/tmp.AVq9QyJsLY /tmp/tmp.ruaxH0C9ou ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:15:10+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p4sVhy6twf +++ mktemp ++ local LAST_ERR=/tmp/tmp.dDFEjrjM5u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p4sVhy6twf ++ cat /tmp/tmp.dDFEjrjM5u ++ rm /tmp/tmp.p4sVhy6twf /tmp/tmp.dDFEjrjM5u ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.mgbsSbVbw4 ++ mktemp + local LAST_ERR=/tmp/tmp.8FUb1bqxc0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mgbsSbVbw4 + cat /tmp/tmp.8FUb1bqxc0 + rm /tmp/tmp.mgbsSbVbw4 /tmp/tmp.8FUb1bqxc0 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.3Kihw2RMl4/find-3rd + desc 'CASE 3: Physical backup and restore' + set +o xtrace ----------------------------------------------------------------------------------- CASE 3: Physical backup and restore ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical + run_backup nfs backup-nfs-physical physical + local storage=nfs + local backup_name=backup-nfs-physical + local type=physical + log 'running backup backup-nfs-physical' + set +o xtrace [2026-01-21T13:15:13+0000] running backup backup-nfs-physical + yq eval '.metadata.name = "backup-nfs-physical" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/backup-nfs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.q5504g5OSY ++ mktemp + local LAST_ERR=/tmp/tmp.8k0bbgdzMs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q5504g5OSY perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical created + cat /tmp/tmp.8k0bbgdzMs + rm /tmp/tmp.q5504g5OSY /tmp/tmp.8k0bbgdzMs + return 0 + wait_backup backup-nfs-physical + local backup_name=backup-nfs-physical + local target_state=ready + set +o xtrace waiting for backup-nfs-physical to reach ready state.......OK + run_recovery_check backup-nfs-physical some-name -4th -3rd + local backup=backup-nfs-physical + local cluster=some-name + local find_prefix_before=-4th + local find_prefix_after=-3rd + write_data 100501 -4th + local x=100501 + local find_prefix=-4th + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LutmaKiZgQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ufsZvK8by3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LutmaKiZgQ ++ cat /tmp/tmp.ufsZvK8by3 ++ rm /tmp/tmp.LutmaKiZgQ /tmp/tmp.ufsZvK8by3 ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hk8r4P5nWp ++ mktemp + local LAST_ERR=/tmp/tmp.JzoEfa9BKM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hk8r4P5nWp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9508815c-bd60-4063-b60a-57c8e2e80ad6") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.JzoEfa9BKM + rm /tmp/tmp.hk8r4P5nWp /tmp/tmp.JzoEfa9BKM + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 -4th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix=-4th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:15:30+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZaKF42VCQF +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZE9LjG6qt8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZaKF42VCQF ++ cat /tmp/tmp.ZE9LjG6qt8 ++ rm /tmp/tmp.ZaKF42VCQF /tmp/tmp.ZE9LjG6qt8 ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.14wwlVG3jU ++ mktemp + local LAST_ERR=/tmp/tmp.PKKGAmJpLl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.14wwlVG3jU + cat /tmp/tmp.PKKGAmJpLl + rm /tmp/tmp.14wwlVG3jU /tmp/tmp.PKKGAmJpLl + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find-4th.json /tmp/tmp.3Kihw2RMl4/find-4th + run_restore backup-nfs-physical + local backup_name=backup-nfs-physical + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/restore.yml + log 'running restore restore-backup-nfs-physical' + set +o xtrace [2026-01-21T13:15:32+0000] running restore restore-backup-nfs-physical + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-nfs-physical/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-nfs-physical/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.HuZGKszBcU ++ mktemp + local LAST_ERR=/tmp/tmp.EpsLrVVdlP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HuZGKszBcU perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical created + cat /tmp/tmp.EpsLrVVdlP + rm /tmp/tmp.HuZGKszBcU /tmp/tmp.EpsLrVVdlP + return 0 + wait_restore backup-nfs-physical some-name + local backup_name=backup-nfs-physical + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical to reach state "ready" .....OK after 4 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9VVScBKIlF +++ mktemp ++ local LAST_ERR=/tmp/tmp.xP2tL7bpx0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9VVScBKIlF ++ cat /tmp/tmp.xP2tL7bpx0 ++ rm /tmp/tmp.9VVScBKIlF /tmp/tmp.xP2tL7bpx0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3i2x0JRRx1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.j1d6wPeaED ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3i2x0JRRx1 ++ cat /tmp/tmp.j1d6wPeaED ++ rm /tmp/tmp.3i2x0JRRx1 /tmp/tmp.j1d6wPeaED ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wJkcHythIx +++ mktemp ++ local LAST_ERR=/tmp/tmp.jRGAlvNvQL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wJkcHythIx ++ cat /tmp/tmp.jRGAlvNvQL ++ rm /tmp/tmp.wJkcHythIx /tmp/tmp.jRGAlvNvQL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wOAf2f005b +++ mktemp ++ local LAST_ERR=/tmp/tmp.yOsy1L1Nhg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wOAf2f005b ++ cat /tmp/tmp.yOsy1L1Nhg ++ rm /tmp/tmp.wOAf2f005b /tmp/tmp.yOsy1L1Nhg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.77XppsNoOg +++ mktemp ++ local LAST_ERR=/tmp/tmp.4roM3RmUD2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.77XppsNoOg ++ cat /tmp/tmp.4roM3RmUD2 ++ rm /tmp/tmp.77XppsNoOg /tmp/tmp.4roM3RmUD2 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GjNL4MY1Km +++ mktemp ++ local LAST_ERR=/tmp/tmp.VsDCU474cz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GjNL4MY1Km ++ cat /tmp/tmp.VsDCU474cz ++ rm /tmp/tmp.GjNL4MY1Km /tmp/tmp.VsDCU474cz ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aaIcdFJmk3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.80lDAS0lBm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aaIcdFJmk3 ++ cat /tmp/tmp.80lDAS0lBm ++ rm /tmp/tmp.aaIcdFJmk3 /tmp/tmp.80lDAS0lBm ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c39uFW6dvK +++ mktemp ++ local LAST_ERR=/tmp/tmp.XdGYikqadD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c39uFW6dvK ++ cat /tmp/tmp.XdGYikqadD ++ rm /tmp/tmp.c39uFW6dvK /tmp/tmp.XdGYikqadD ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wf5E7FVCt7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sJITzBchBK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wf5E7FVCt7 ++ cat /tmp/tmp.sJITzBchBK ++ rm /tmp/tmp.Wf5E7FVCt7 /tmp/tmp.sJITzBchBK ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 -3rd .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix=-3rd + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:21:34+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M2MZhxFoxQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.f7hR954VxQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M2MZhxFoxQ ++ cat /tmp/tmp.f7hR954VxQ ++ rm /tmp/tmp.M2MZhxFoxQ /tmp/tmp.f7hR954VxQ ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.axmZljznfd ++ mktemp + local LAST_ERR=/tmp/tmp.dQQ4oyCikz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.axmZljznfd + cat /tmp/tmp.dQQ4oyCikz + rm /tmp/tmp.axmZljznfd /tmp/tmp.dQQ4oyCikz + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find-3rd.json /tmp/tmp.3Kihw2RMl4/find-3rd + desc 'CASE 4: Physical backup and PiTR' + set +o xtrace ----------------------------------------------------------------------------------- CASE 4: Physical backup and PiTR ----------------------------------------------------------------------------------- + backup_name=backup-nfs-physical-pitr + run_backup nfs backup-nfs-physical-pitr physical + local storage=nfs + local backup_name=backup-nfs-physical-pitr + local type=physical + log 'running backup backup-nfs-physical-pitr' + set +o xtrace [2026-01-21T13:21:36+0000] running backup backup-nfs-physical-pitr + kubectl_bin apply -f - + yq eval '.metadata.name = "backup-nfs-physical-pitr" | .spec.storageName = "nfs" | .spec.type = "physical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/backup-nfs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6lh51IPV3y ++ mktemp + local LAST_ERR=/tmp/tmp.BITQGBLBlV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6lh51IPV3y perconaservermongodbbackup.psmdb.percona.com/backup-nfs-physical-pitr created + cat /tmp/tmp.BITQGBLBlV + rm /tmp/tmp.6lh51IPV3y /tmp/tmp.BITQGBLBlV + return 0 + wait_backup backup-nfs-physical-pitr + local backup_name=backup-nfs-physical-pitr + local target_state=ready + set +o xtrace waiting for backup-nfs-physical-pitr to reach ready state.......OK + write_data 100503 -5th + local x=100503 + local find_prefix=-5th + run_mongo 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GE53Zr1gdP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Sr9wKwPYS5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GE53Zr1gdP ++ cat /tmp/tmp.Sr9wKwPYS5 ++ rm /tmp/tmp.GE53Zr1gdP /tmp/tmp.Sr9wKwPYS5 ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zGaswICWaX ++ mktemp + local LAST_ERR=/tmp/tmp.AL1NCX5yNF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zGaswICWaX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b9145b93-a273-4f51-b393-51bba350d07d") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.AL1NCX5yNF + rm /tmp/tmp.zGaswICWaX /tmp/tmp.AL1NCX5yNF + return 0 + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:21:53+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uAkKczABIV +++ mktemp ++ local LAST_ERR=/tmp/tmp.bpynyP9zgS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uAkKczABIV ++ cat /tmp/tmp.bpynyP9zgS ++ rm /tmp/tmp.uAkKczABIV /tmp/tmp.bpynyP9zgS ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.nG4IEWg9kA ++ mktemp + local LAST_ERR=/tmp/tmp.F8mQb3PAWb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nG4IEWg9kA + cat /tmp/tmp.F8mQb3PAWb + rm /tmp/tmp.nG4IEWg9kA /tmp/tmp.F8mQb3PAWb + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.3Kihw2RMl4/find-5th + run_pitr_check backup-nfs-physical-pitr some-name -5th + local backup=backup-nfs-physical-pitr + local cluster=some-name + local find_prefix=-5th + wait_for_oplogs some-name + local cluster1=some-name ++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ jq '.backups.snapshot[0].restoreTo' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T9sNYAUnfa +++ mktemp ++ local LAST_ERR=/tmp/tmp.IgQWx8Nhi5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T9sNYAUnfa ++ cat /tmp/tmp.IgQWx8Nhi5 ++ rm /tmp/tmp.T9sNYAUnfa /tmp/tmp.IgQWx8Nhi5 ++ return 0 + local backup_last_write=1769001701 + local retries=0 ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RRXQ0ogVqK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qTVeJMw3QI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RRXQ0ogVqK +++ cat /tmp/tmp.qTVeJMw3QI +++ rm /tmp/tmp.RRXQ0ogVqK /tmp/tmp.qTVeJMw3QI +++ return 0 ++ echo 1769001286 + local last_chunk=1769001286 + [[ 1769001286 -gt 1769001701 ]] + [[ 0 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.J7qxXTadm1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.a5fgko7qVn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.J7qxXTadm1 +++ cat /tmp/tmp.a5fgko7qVn +++ rm /tmp/tmp.J7qxXTadm1 /tmp/tmp.a5fgko7qVn +++ return 0 ++ echo 1769001286 + last_chunk=1769001286 + retries=1 ++ format_date 1769001286 ++ local timestamp=1769001286 +++ TZ=UTC +++ /usr/sbin/date -d@1769001286 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:46 ++ format_date 1769001701 ++ local timestamp=1769001701 +++ TZ=UTC +++ /usr/sbin/date -d@1769001701 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:21:41 + log 'Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41)' + set +o xtrace [2026-01-21T13:22:00+0000] Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41) + sleep 10 + [[ 1769001286 -gt 1769001701 ]] + [[ 1 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.F7XEMgT0qd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FLAcjx0JxM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.F7XEMgT0qd +++ cat /tmp/tmp.FLAcjx0JxM +++ rm /tmp/tmp.F7XEMgT0qd /tmp/tmp.FLAcjx0JxM +++ return 0 ++ echo 1769001286 + last_chunk=1769001286 + retries=2 ++ format_date 1769001286 ++ local timestamp=1769001286 +++ TZ=UTC +++ /usr/sbin/date -d@1769001286 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:46 ++ format_date 1769001701 ++ local timestamp=1769001701 +++ TZ=UTC +++ /usr/sbin/date -d@1769001701 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:21:41 + log 'Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41)' + set +o xtrace [2026-01-21T13:22:12+0000] Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41) + sleep 10 + [[ 1769001286 -gt 1769001701 ]] + [[ 2 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hZTaOtMMcH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Qz9kaJ3Y8r +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hZTaOtMMcH +++ cat /tmp/tmp.Qz9kaJ3Y8r +++ rm /tmp/tmp.hZTaOtMMcH /tmp/tmp.Qz9kaJ3Y8r +++ return 0 ++ echo 1769001286 + last_chunk=1769001286 + retries=3 ++ format_date 1769001286 ++ local timestamp=1769001286 +++ TZ=UTC +++ /usr/sbin/date -d@1769001286 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:46 ++ format_date 1769001701 ++ local timestamp=1769001701 +++ TZ=UTC +++ /usr/sbin/date -d@1769001701 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:21:41 + log 'Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41)' + set +o xtrace [2026-01-21T13:22:24+0000] Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41) + sleep 10 + [[ 1769001286 -gt 1769001701 ]] + [[ 3 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fxGiISoDRn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zsIHqzSPo2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fxGiISoDRn +++ cat /tmp/tmp.zsIHqzSPo2 +++ rm /tmp/tmp.fxGiISoDRn /tmp/tmp.zsIHqzSPo2 +++ return 0 ++ echo 1769001286 + last_chunk=1769001286 + retries=4 ++ format_date 1769001286 ++ local timestamp=1769001286 +++ TZ=UTC +++ /usr/sbin/date -d@1769001286 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:46 ++ format_date 1769001701 ++ local timestamp=1769001701 +++ TZ=UTC +++ /usr/sbin/date -d@1769001701 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:21:41 + log 'Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41)' + set +o xtrace [2026-01-21T13:22:35+0000] Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41) + sleep 10 + [[ 1769001286 -gt 1769001701 ]] + [[ 4 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.a5sIrm1ALI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.s4x9s3GS8Z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.a5sIrm1ALI +++ cat /tmp/tmp.s4x9s3GS8Z +++ rm /tmp/tmp.a5sIrm1ALI /tmp/tmp.s4x9s3GS8Z +++ return 0 ++ echo 1769001286 + last_chunk=1769001286 + retries=5 ++ format_date 1769001286 ++ local timestamp=1769001286 +++ TZ=UTC +++ /usr/sbin/date -d@1769001286 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:46 ++ format_date 1769001701 ++ local timestamp=1769001701 +++ TZ=UTC +++ /usr/sbin/date -d@1769001701 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:21:41 + log 'Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41)' + set +o xtrace [2026-01-21T13:22:47+0000] Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41) + sleep 10 + [[ 1769001286 -gt 1769001701 ]] + [[ 5 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KU9xAlw8un ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pHJst3kELo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KU9xAlw8un +++ cat /tmp/tmp.pHJst3kELo +++ rm /tmp/tmp.KU9xAlw8un /tmp/tmp.pHJst3kELo +++ return 0 ++ echo 1769001286 + last_chunk=1769001286 + retries=6 ++ format_date 1769001286 ++ local timestamp=1769001286 +++ TZ=UTC +++ /usr/sbin/date -d@1769001286 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:14:46 ++ format_date 1769001701 ++ local timestamp=1769001701 +++ TZ=UTC +++ /usr/sbin/date -d@1769001701 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:21:41 + log 'Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41)' + set +o xtrace [2026-01-21T13:22:59+0000] Waiting for last oplog chunk (2026-01-21 13:14:46) to be greater than last write (2026-01-21 13:21:41) + sleep 10 + [[ 1769001286 -gt 1769001701 ]] + [[ 6 -gt 30 ]] ++ get_latest_oplog_chunk_ts some-name ++ local cluster=some-name +++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gLLRAccFJa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gyguBTYjbY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gLLRAccFJa +++ cat /tmp/tmp.gyguBTYjbY +++ rm /tmp/tmp.gLLRAccFJa /tmp/tmp.gyguBTYjbY +++ return 0 ++ echo 1769001780 + last_chunk=1769001780 + retries=7 ++ format_date 1769001780 ++ local timestamp=1769001780 +++ TZ=UTC +++ /usr/sbin/date -d@1769001780 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:23:00 ++ format_date 1769001701 ++ local timestamp=1769001701 +++ TZ=UTC +++ /usr/sbin/date -d@1769001701 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:21:41 + log 'Waiting for last oplog chunk (2026-01-21 13:23:00) to be greater than last write (2026-01-21 13:21:41)' + set +o xtrace [2026-01-21T13:23:11+0000] Waiting for last oplog chunk (2026-01-21 13:23:00) to be greater than last write (2026-01-21 13:21:41) + sleep 10 + [[ 1769001780 -gt 1769001701 ]] +++ get_latest_oplog_chunk_ts some-name +++ local cluster=some-name ++++ kubectl_bin exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ jq '.backups.pitrChunks.pitrChunks | last | .range.end' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ohFR5G1oIB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yal1RTcSdv ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl exec some-name-rs0-0 -c backup-agent -- pbm status -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ohFR5G1oIB ++++ cat /tmp/tmp.yal1RTcSdv ++++ rm /tmp/tmp.ohFR5G1oIB /tmp/tmp.yal1RTcSdv ++++ return 0 +++ echo 1769001780 ++ format_date 1769001780 ++ local timestamp=1769001780 +++ TZ=UTC +++ /usr/sbin/date -d@1769001780 '+%Y-%m-%d %H:%M:%S' ++ echo 2026-01-21 13:23:00 + local 'target_time=2026-01-21 13:23:00' + log 'dropping test collection' + set +o xtrace [2026-01-21T13:23:23+0000] dropping test collection + run_mongo 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kidnh2gzIS +++ mktemp ++ local LAST_ERR=/tmp/tmp.b3zqdijSoN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kidnh2gzIS ++ cat /tmp/tmp.b3zqdijSoN ++ rm /tmp/tmp.kidnh2gzIS /tmp/tmp.b3zqdijSoN ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.SInrVLaJJA ++ mktemp + local LAST_ERR=/tmp/tmp.tpMU4pITAT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SInrVLaJJA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-fs-20300.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2d536a7e-085b-49d9-85cd-963dda1d283b") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.tpMU4pITAT + rm /tmp/tmp.SInrVLaJJA /tmp/tmp.tpMU4pITAT + return 0 + log 'checking pitr... backup: backup-nfs-physical-pitr target: 2026-01-21 13:23:00' + set +o xtrace [2026-01-21T13:23:25+0000] checking pitr... backup: backup-nfs-physical-pitr target: 2026-01-21 13:23:00 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/conf/pitr.yml + yq eval '.metadata.name = "restore-backup-nfs-physical-pitr"' + yq eval '.spec.backupName = "backup-nfs-physical-pitr"' + yq eval '.spec.pitr.date = "2026-01-21 13:23:00"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XxYIlIwORA ++ mktemp + local LAST_ERR=/tmp/tmp.0A6aMwlIIu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XxYIlIwORA perconaservermongodbrestore.psmdb.percona.com/restore-backup-nfs-physical-pitr created + cat /tmp/tmp.0A6aMwlIIu + rm /tmp/tmp.XxYIlIwORA /tmp/tmp.0A6aMwlIIu + return 0 + wait_restore backup-nfs-physical-pitr some-name + local backup_name=backup-nfs-physical-pitr + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-nfs-physical-pitr object to be created.OK Waiting psmdb-restore/restore-backup-nfs-physical-pitr to reach state "ready" ......OK after 5 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HM25flVLqG +++ mktemp ++ local LAST_ERR=/tmp/tmp.6MjMJgrJtO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HM25flVLqG ++ cat /tmp/tmp.6MjMJgrJtO ++ rm /tmp/tmp.HM25flVLqG /tmp/tmp.6MjMJgrJtO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KTZuIsgr4a +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZriTrM8tbb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KTZuIsgr4a ++ cat /tmp/tmp.ZriTrM8tbb ++ rm /tmp/tmp.KTZuIsgr4a /tmp/tmp.ZriTrM8tbb ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gNAbUuFuGr +++ mktemp ++ local LAST_ERR=/tmp/tmp.nuSa4kpxlR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gNAbUuFuGr ++ cat /tmp/tmp.nuSa4kpxlR ++ rm /tmp/tmp.gNAbUuFuGr /tmp/tmp.nuSa4kpxlR ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TBs1KYEJ3W +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mvabc1lZru ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TBs1KYEJ3W ++ cat /tmp/tmp.Mvabc1lZru ++ rm /tmp/tmp.TBs1KYEJ3W /tmp/tmp.Mvabc1lZru ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C60psqQD4b +++ mktemp ++ local LAST_ERR=/tmp/tmp.CrfbzlNOZu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C60psqQD4b ++ cat /tmp/tmp.CrfbzlNOZu ++ rm /tmp/tmp.C60psqQD4b /tmp/tmp.CrfbzlNOZu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mmWaJ844UT +++ mktemp ++ local LAST_ERR=/tmp/tmp.QeHvLFOmnH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mmWaJ844UT ++ cat /tmp/tmp.QeHvLFOmnH ++ rm /tmp/tmp.mmWaJ844UT /tmp/tmp.QeHvLFOmnH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WovWGfdfqz +++ mktemp ++ local LAST_ERR=/tmp/tmp.IeXvelJuLH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WovWGfdfqz ++ cat /tmp/tmp.IeXvelJuLH ++ rm /tmp/tmp.WovWGfdfqz /tmp/tmp.IeXvelJuLH ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongo_cmd find myApp:myPass@some-name-rs0.demand-backup-fs-20300 -5th .svc.cluster.local myApp test + local command=find + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local postfix=-5th + local suffix=.svc.cluster.local + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-01-21T13:30:21+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0.demand-backup-fs-20300 mongodb .svc.cluster.local '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0.demand-backup-fs-20300 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0.demand-backup-fs-20300 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zmiFZySB8t +++ mktemp ++ local LAST_ERR=/tmp/tmp.a96iVnlQS8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zmiFZySB8t ++ cat /tmp/tmp.a96iVnlQS8 ++ rm /tmp/tmp.zmiFZySB8t /tmp/tmp.a96iVnlQS8 ++ return 0 + local client_container=psmdb-client-696897d69b-s95hn + kubectl_bin exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.X8lghthMv5 ++ mktemp + local LAST_ERR=/tmp/tmp.RPXtF4X1bO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-s95hn -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0.demand-backup-fs-20300.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X8lghthMv5 + cat /tmp/tmp.RPXtF4X1bO + rm /tmp/tmp.X8lghthMv5 /tmp/tmp.RPXtF4X1bO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/demand-backup-fs/compare/find-5th.json /tmp/tmp.3Kihw2RMl4/find-5th + destroy demand-backup-fs-20300 + local namespace=demand-backup-fs-20300 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.XrrqcrfQoQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6sa8aUPIdk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XrrqcrfQoQ ++ cat /tmp/tmp.6sa8aUPIdk ++ rm /tmp/tmp.XrrqcrfQoQ /tmp/tmp.6sa8aUPIdk ++ return 0 + '[' 4 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.aIcCZGvtiB ++ mktemp + local LAST_ERR=/tmp/tmp.Ia9oyXVqK7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aIcCZGvtiB NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-nfs-logical some-name nfs /mnt/nfs/2026-01-21T13:11:53Z logical 38.34KB ready 18m 18m backup-nfs-logical-pitr some-name nfs /mnt/nfs/2026-01-21T13:12:37Z logical 44.00KB ready 17m 17m backup-nfs-physical some-name nfs /mnt/nfs/2026-01-21T13:15:15Z physical 1.37MB ready 15m 15m backup-nfs-physical-pitr some-name nfs /mnt/nfs/2026-01-21T13:21:39Z physical 932.27KB ready 8m39s 8m48s + cat /tmp/tmp.Ia9oyXVqK7 + rm /tmp/tmp.aIcCZGvtiB /tmp/tmp.Ia9oyXVqK7 + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.Lj5xyt5snm ++ mktemp + local LAST_ERR=/tmp/tmp.atWDT6fGEG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lj5xyt5snm perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical" deleted from demand-backup-fs-20300 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-logical-pitr" deleted from demand-backup-fs-20300 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical" deleted from demand-backup-fs-20300 namespace perconaservermongodbbackup.psmdb.percona.com "backup-nfs-physical-pitr" deleted from demand-backup-fs-20300 namespace + cat /tmp/tmp.atWDT6fGEG + rm /tmp/tmp.Lj5xyt5snm /tmp/tmp.atWDT6fGEG + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.tpNBffwfTr ++ mktemp + local LAST_ERR=/tmp/tmp.gSA2iWr1Kl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tpNBffwfTr customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.gSA2iWr1Kl + rm /tmp/tmp.tpNBffwfTr /tmp/tmp.gSA2iWr1Kl + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.KWSgHOHT3P ++ mktemp + local LAST_ERR=/tmp/tmp.bD5cO5dUVh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KWSgHOHT3P + cat /tmp/tmp.bD5cO5dUVh + rm /tmp/tmp.KWSgHOHT3P /tmp/tmp.bD5cO5dUVh + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.36jxqVmqY7 ++ mktemp + local LAST_ERR=/tmp/tmp.UkbwdKYdpr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.36jxqVmqY7 + cat /tmp/tmp.UkbwdKYdpr + rm /tmp/tmp.36jxqVmqY7 /tmp/tmp.UkbwdKYdpr + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-fs-20300 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Iwt3frIZz7 ++ mktemp + local LAST_ERR=/tmp/tmp.HhI33pH6Ad + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Iwt3frIZz7 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.HhI33pH6Ad + rm /tmp/tmp.Iwt3frIZz7 /tmp/tmp.HhI33pH6Ad + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.mjW3IEOzpV ++ mktemp + local LAST_ERR=/tmp/tmp.55CtLc8Vmg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mjW3IEOzpV clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.55CtLc8Vmg + rm /tmp/tmp.mjW3IEOzpV /tmp/tmp.55CtLc8Vmg + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.rKBsBrpGHK ++ mktemp + local LAST_ERR=/tmp/tmp.sHrYDoztAG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.rKBsBrpGHK + cat /tmp/tmp.sHrYDoztAG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.rKBsBrpGHK + cat /tmp/tmp.sHrYDoztAG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.rKBsBrpGHK + cat /tmp/tmp.sHrYDoztAG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.rKBsBrpGHK + cat /tmp/tmp.sHrYDoztAG Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.rKBsBrpGHK /tmp/tmp.sHrYDoztAG + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace demand-backup-fs-20300 + rm -rf /tmp/tmp.3Kihw2RMl4 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.EiowADvnd7 + local LAST_OUT=/tmp/tmp.3PGhJBh5yp ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.0ufFr2ORs6 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.PVeNIRQ46O + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace demand-backup-fs-20300 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator