Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/logs/demand-backup-snapshot-vault.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + vault_name=vault-service ++ deploy_volume_snapshot_class ++ local snapshot_class_file ++ local k8s_env +++ detect_k8s_env +++ local provider=unknown +++ kubectl_bin api-resources +++ grep -q openshift.io ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5HgybT38uk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MJaRiOtXyW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl api-resources +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5HgybT38uk +++ cat /tmp/tmp.MJaRiOtXyW +++ rm /tmp/tmp.5HgybT38uk /tmp/tmp.MJaRiOtXyW +++ return 0 +++ kubectl_bin get nodes -o 'jsonpath={.items[0].metadata.labels}' +++ grep -q eks.amazonaws.com +++ kubectl_bin get nodes -o 'jsonpath={.items[0].metadata.labels}' +++ grep -q cloud.google.com/gke +++ provider=gke +++ echo gke ++ k8s_env=gke ++ case "$k8s_env" in ++ snapshot_class_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/gke-volume-snapshot-class.yaml ++ kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/gke-volume-snapshot-class.yaml +++ mktemp ++ local LAST_OUT=/tmp/tmp.pFhWs92ZZl +++ mktemp ++ local LAST_ERR=/tmp/tmp.eWEfXWFhhN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/gke-volume-snapshot-class.yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pFhWs92ZZl volumesnapshotclass.snapshot.storage.k8s.io/gke-snapshot-class unchanged ++ cat /tmp/tmp.eWEfXWFhhN ++ rm /tmp/tmp.pFhWs92ZZl /tmp/tmp.eWEfXWFhhN ++ return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/gke-volume-snapshot-class.yaml + VOLUME_SNAPSHOT_CLASS=gke-snapshot-class + cluster=some-name + create_infra demand-backup-snapshot-vault-10718 + local ns=demand-backup-snapshot-vault-10718 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.6xOhazelQI ++ mktemp + local LAST_ERR=/tmp/tmp.OcvTpv2r0J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6xOhazelQI customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.OcvTpv2r0J + rm /tmp/tmp.6xOhazelQI /tmp/tmp.OcvTpv2r0J + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n demand-backup-snapshot-vault-21061 backup-snapshot-vault --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodbbackup.psmdb.percona.com/backup-snapshot-vault patched + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.hy3OG0wLjs ++ mktemp + local LAST_ERR=/tmp/tmp.w7zwJ9TB4r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hy3OG0wLjs + cat /tmp/tmp.w7zwJ9TB4r + rm /tmp/tmp.hy3OG0wLjs /tmp/tmp.w7zwJ9TB4r + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.CMU77gAakB ++ mktemp + local LAST_ERR=/tmp/tmp.GQd11aJSoS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CMU77gAakB + cat /tmp/tmp.GQd11aJSoS + rm /tmp/tmp.CMU77gAakB /tmp/tmp.GQd11aJSoS + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n demand-backup-snapshot-vault-21061 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.egDGvlsGtS ++ mktemp + local LAST_ERR=/tmp/tmp.b2Oozkzblb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.egDGvlsGtS customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.b2Oozkzblb + rm /tmp/tmp.egDGvlsGtS /tmp/tmp.b2Oozkzblb + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.YTSEaQ96AB ++ mktemp + local LAST_ERR=/tmp/tmp.2YmvC8Eq8i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YTSEaQ96AB clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.2YmvC8Eq8i + rm /tmp/tmp.YTSEaQ96AB /tmp/tmp.2YmvC8Eq8i + return 0 + check_crd_for_deletion PR-2256-98b7d85f + local git_tag=PR-2256-98b7d85f ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2256-98b7d85f/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.duWRU0CDlN +++ mktemp ++ local LAST_ERR=/tmp/tmp.wWWuqOJD7F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.duWRU0CDlN ++ cat /tmp/tmp.wWWuqOJD7F Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.duWRU0CDlN ++ cat /tmp/tmp.wWWuqOJD7F Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.duWRU0CDlN ++ cat /tmp/tmp.wWWuqOJD7F Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.duWRU0CDlN ++ cat /tmp/tmp.wWWuqOJD7F Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.duWRU0CDlN /tmp/tmp.wWWuqOJD7F ++ return 1 + [[ '' == Terminating ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + awk '{print$1}' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.qfAQ5ysNfw ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.mgZfmFkjkM + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.kbgUEAdra3 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.7sqlVHIaXd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qfAQ5ysNfw + cat /tmp/tmp.mgZfmFkjkM + rm /tmp/tmp.qfAQ5ysNfw /tmp/tmp.mgZfmFkjkM + return 0 namespace "demand-backup-snapshot-vault-21061" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kbgUEAdra3 namespace "psmdb-operator" deleted + cat /tmp/tmp.7sqlVHIaXd + rm /tmp/tmp.kbgUEAdra3 /tmp/tmp.7sqlVHIaXd + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.rXUyWupvfu ++ mktemp + local LAST_ERR=/tmp/tmp.zTHQ6ECgWp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rXUyWupvfu + cat /tmp/tmp.zTHQ6ECgWp + rm /tmp/tmp.rXUyWupvfu /tmp/tmp.zTHQ6ECgWp + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.rw2A2KmfAe ++ mktemp + local LAST_ERR=/tmp/tmp.UbjjiXXdiZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rw2A2KmfAe namespace/psmdb-operator created + cat /tmp/tmp.UbjjiXXdiZ + rm /tmp/tmp.rw2A2KmfAe /tmp/tmp.UbjjiXXdiZ + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.EtBJeAiDpu +++ mktemp ++ local LAST_ERR=/tmp/tmp.XxWTnDR3WZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EtBJeAiDpu ++ cat /tmp/tmp.XxWTnDR3WZ ++ rm /tmp/tmp.EtBJeAiDpu /tmp/tmp.XxWTnDR3WZ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-98b7d85f-7-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.OabZDyD3YF ++ mktemp + local LAST_ERR=/tmp/tmp.aZk3teN0dB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-98b7d85f-7-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OabZDyD3YF Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-98b7d85f-7-cluster7" modified. + cat /tmp/tmp.aZk3teN0dB + rm /tmp/tmp.OabZDyD3YF /tmp/tmp.aZk3teN0dB + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2256-98b7d85f' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2256-98b7d85f ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.i6nAveHNtR ++ mktemp + local LAST_ERR=/tmp/tmp.HNhyhzB8E3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i6nAveHNtR customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.HNhyhzB8E3 + rm /tmp/tmp.i6nAveHNtR /tmp/tmp.HNhyhzB8E3 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.AoihswFue3 ++ mktemp + local LAST_ERR=/tmp/tmp.ZIETrsf3VP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AoihswFue3 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ZIETrsf3VP + rm /tmp/tmp.AoihswFue3 /tmp/tmp.ZIETrsf3VP + return 0 + yq eval $'\n\t\t\t(.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2256-98b7d85f") |\n\t\t\t((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") |\n\t\t\t((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JCbpyykaT1 ++ mktemp + local LAST_ERR=/tmp/tmp.HzqQM0om17 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JCbpyykaT1 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.HzqQM0om17 + rm /tmp/tmp.JCbpyykaT1 /tmp/tmp.HzqQM0om17 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.vPGzz3egcY +++ mktemp ++ local LAST_ERR=/tmp/tmp.8B1TAcGvoM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vPGzz3egcY ++ cat /tmp/tmp.8B1TAcGvoM ++ rm /tmp/tmp.vPGzz3egcY /tmp/tmp.8B1TAcGvoM ++ return 0 + wait_operator_pod percona-server-mongodb-operator-7d9c7747d9-cdlh9 + local pod=percona-server-mongodb-operator-7d9c7747d9-cdlh9 + set +o xtrace waiting for pod/percona-server-mongodb-operator-7d9c7747d9-cdlh9 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.QRaHvWOn0c +++ mktemp ++ local LAST_ERR=/tmp/tmp.PVovRPFRTf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QRaHvWOn0c ++ cat /tmp/tmp.PVovRPFRTf ++ rm /tmp/tmp.QRaHvWOn0c /tmp/tmp.PVovRPFRTf ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-7d9c7747d9-cdlh9 ++ mktemp + local LAST_OUT=/tmp/tmp.81nr4GrU0J ++ mktemp + local LAST_ERR=/tmp/tmp.ILmtyUxFA3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-7d9c7747d9-cdlh9 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.81nr4GrU0J + cat /tmp/tmp.ILmtyUxFA3 + rm /tmp/tmp.81nr4GrU0J /tmp/tmp.ILmtyUxFA3 + return 0 2026-04-14T10:36:07.391Z INFO setup Manager starting up {"gitCommit": "98b7d85f6fc9316ce5207960e26f4bbf2b33bfd3", "gitBranch": "PR-2256-98b7d85f", "buildTime": "", "goVersion": "go1.25.9", "os": "linux", "arch": "amd64"} + create_namespace demand-backup-snapshot-vault-10718 + local namespace=demand-backup-snapshot-vault-10718 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces demand-backup-snapshot-vault-10718' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-snapshot-vault-10718 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-snapshot-vault-10718 --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.XjN6uzjL5D + local LAST_OUT=/tmp/tmp.DMQgkKmNvR ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.ZBJ93Rk9lt + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.4ejKGV5XxD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace demand-backup-snapshot-vault-10718 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DMQgkKmNvR + cat /tmp/tmp.4ejKGV5XxD + rm /tmp/tmp.DMQgkKmNvR /tmp/tmp.4ejKGV5XxD + return 0 + kubectl_bin wait --for=delete namespace demand-backup-snapshot-vault-10718 ++ mktemp + local LAST_OUT=/tmp/tmp.98o9OrgdRn ++ mktemp + local LAST_ERR=/tmp/tmp.pno15ym9Aa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace demand-backup-snapshot-vault-10718 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XjN6uzjL5D + cat /tmp/tmp.ZBJ93Rk9lt + rm /tmp/tmp.XjN6uzjL5D /tmp/tmp.ZBJ93Rk9lt + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.98o9OrgdRn + cat /tmp/tmp.pno15ym9Aa + rm /tmp/tmp.98o9OrgdRn /tmp/tmp.pno15ym9Aa + return 0 + desc 'create namespace demand-backup-snapshot-vault-10718' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-snapshot-vault-10718 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-snapshot-vault-10718 ++ mktemp + local LAST_OUT=/tmp/tmp.Xa9vAY7tZz ++ mktemp + local LAST_ERR=/tmp/tmp.Rmxxn7e9rG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace demand-backup-snapshot-vault-10718 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xa9vAY7tZz namespace/demand-backup-snapshot-vault-10718 created + cat /tmp/tmp.Rmxxn7e9rG + rm /tmp/tmp.Xa9vAY7tZz /tmp/tmp.Rmxxn7e9rG + return 0 + set_kube_ctx demand-backup-snapshot-vault-10718 + local namespace=demand-backup-snapshot-vault-10718 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.BWWhZbBaEQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.foWzmgl4tF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BWWhZbBaEQ ++ cat /tmp/tmp.foWzmgl4tF ++ rm /tmp/tmp.BWWhZbBaEQ /tmp/tmp.foWzmgl4tF ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-98b7d85f-7-cluster7 --namespace=demand-backup-snapshot-vault-10718 ++ mktemp + local LAST_OUT=/tmp/tmp.XM6sqyi28V ++ mktemp + local LAST_ERR=/tmp/tmp.Iz8quR0uf2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-98b7d85f-7-cluster7 --namespace=demand-backup-snapshot-vault-10718 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XM6sqyi28V Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2256-98b7d85f-7-cluster7" modified. + cat /tmp/tmp.Iz8quR0uf2 + rm /tmp/tmp.XM6sqyi28V /tmp/tmp.Iz8quR0uf2 + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Tue Apr 14 10:36:39 2026 NAMESPACE: demand-backup-snapshot-vault-10718 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-snapshot-vault-10718.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-snapshot-vault-10718 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-snapshot-vault-10718 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-snapshot-vault-10718 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-snapshot-vault-10718 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CqC3Hdcatb +++ mktemp ++ local LAST_ERR=/tmp/tmp.OKnTPqyZ5c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CqC3Hdcatb ++ cat /tmp/tmp.OKnTPqyZ5c ++ rm /tmp/tmp.CqC3Hdcatb /tmp/tmp.OKnTPqyZ5c ++ return 0 + local MINIO_POD=minio-service-6d5f646cdc-8frxx + wait_pod minio-service-6d5f646cdc-8frxx + local pod=minio-service-6d5f646cdc-8frxx + set +o xtrace waiting for pod/minio-service-6d5f646cdc-8frxx to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.demand-backup-snapshot-vault-10718.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.sk23BVbhpT ++ mktemp + local LAST_ERR=/tmp/tmp.QiicE76L5j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=docker.io/perconalab/awscli --restart=Never -- bash -c $'AWS_ACCESS_KEY_ID=some-access-key \t\tAWS_SECRET_ACCESS_KEY=some-secret-key \t\tAWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sk23BVbhpT pod "aws-cli" deleted from demand-backup-snapshot-vault-10718 namespace + cat /tmp/tmp.QiicE76L5j All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.sk23BVbhpT /tmp/tmp.QiicE76L5j + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.fVmqT2U2Vn ++ mktemp + local LAST_ERR=/tmp/tmp.zVznH5VP2N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fVmqT2U2Vn secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.zVznH5VP2N + rm /tmp/tmp.fVmqT2U2Vn /tmp/tmp.zVznH5VP2N + return 0 + desc 'Setting up Vault' + set +o xtrace ----------------------------------------------------------------------------------- Setting up Vault ----------------------------------------------------------------------------------- + setup_vault + local sa_namespace=demand-backup-snapshot-vault-10718 + '[' -n psmdb-operator ']' + sa_namespace=psmdb-operator + deploy_vault vault-service --set global.enabled=true --set global.tlsDisable=true --set server.standalone.enabled=true + local name=vault-service + [[ 7 -gt 0 ]] + shift + desc 'install Vault vault-service' + set +o xtrace ----------------------------------------------------------------------------------- install Vault vault-service ----------------------------------------------------------------------------------- + helm uninstall vault-service Error: uninstall: Release not loaded: vault-service: release: not found + : + helm repo remove hashicorp "hashicorp" has been removed from your repositories + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" has been added to your repositories + destroy_vault vault-service + local name=vault-service + local vault_ns ++ helm list --all-namespaces --filter vault-service ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + vault_ns= + desc 'destroy vault' + set +o xtrace ----------------------------------------------------------------------------------- destroy vault ----------------------------------------------------------------------------------- ++ kubectl api-resources ++ awk '{print $1}' ++ grep vault + '[' -n '' ']' ++ kubectl get clusterrolebinding -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrolebinding vault-service-agent-injector-binding vault-service-server-binding clusterrolebinding.rbac.authorization.k8s.io "vault-service-agent-injector-binding" deleted clusterrolebinding.rbac.authorization.k8s.io "vault-service-server-binding" deleted ++ kubectl get clusterrole -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrole vault-service-agent-injector-clusterrole clusterrole.rbac.authorization.k8s.io "vault-service-agent-injector-clusterrole" deleted ++ kubectl get mutatingwebhookconfiguration -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete mutatingwebhookconfiguration vault-service-agent-injector-cfg mutatingwebhookconfiguration.admissionregistration.k8s.io "vault-service-agent-injector-cfg" deleted + [[ -n '' ]] + retry 10 60 helm install vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false --set global.enabled=true --set global.tlsDisable=true --set server.standalone.enabled=true + local max=10 + local delay=60 + shift 2 + local n=1 + helm install vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false --set global.enabled=true --set global.tlsDisable=true --set server.standalone.enabled=true NAME: vault-service LAST DEPLOYED: Tue Apr 14 10:37:44 2026 NAMESPACE: demand-backup-snapshot-vault-10718 STATUS: deployed REVISION: 1 NOTES: Thank you for installing HashiCorp Vault! Now that you have deployed Vault, you should look over the docs on using Vault with Kubernetes available here: https://developer.hashicorp.com/vault/docs Your release is named vault-service. To learn more about the release, try: $ helm status vault-service $ helm get manifest vault-service + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running Running + sleep 5 + kubectl_bin exec pod/vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json ++ mktemp + local LAST_OUT=/tmp/tmp.rkNkpd8ntf ++ mktemp + local LAST_ERR=/tmp/tmp.9zAbvF5dCL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec pod/vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rkNkpd8ntf + cat /tmp/tmp.9zAbvF5dCL + rm /tmp/tmp.rkNkpd8ntf /tmp/tmp.9zAbvF5dCL + return 0 + local unsealKey ++ jq -r '.unseal_keys_b64[]' + unsealKey=142k0Z3CkpgBy6EYnVtB31etXK7UjbetBCD3yjce6rI= + local token ++ jq -r .root_token + token=hvs.TFGRHlDkvYAmO5O0HRP4Va9N + kubectl_bin exec pod/vault-service-0 -- vault operator unseal 142k0Z3CkpgBy6EYnVtB31etXK7UjbetBCD3yjce6rI= ++ mktemp + local LAST_OUT=/tmp/tmp.H5LgItLGAk ++ mktemp + local LAST_ERR=/tmp/tmp.CYo6jU5res + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec pod/vault-service-0 -- vault operator unseal 142k0Z3CkpgBy6EYnVtB31etXK7UjbetBCD3yjce6rI= + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H5LgItLGAk Key Value --- ----- Seal Type shamir Initialized true Sealed false Total Shares 1 Threshold 1 Version 1.21.2 Build Date 2026-01-06T08:33:05Z Storage Type file Cluster Name vault-cluster-a5b7afc0 Cluster ID f2354587-6523-ccdc-d320-ec93a0ac03d4 HA Enabled false + cat /tmp/tmp.CYo6jU5res + rm /tmp/tmp.H5LgItLGAk /tmp/tmp.CYo6jU5res + return 0 + kubectl_bin exec -it pod/vault-service-0 -- sh ++ mktemp + local LAST_OUT=/tmp/tmp.M6g5GPhpXb ++ mktemp + local LAST_ERR=/tmp/tmp.eqR8FkcX4C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -it pod/vault-service-0 -- sh + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M6g5GPhpXb Success! You are now authenticated. The token information displayed below is already stored in the token helper. You do NOT need to run "vault login" again. Future Vault requests will automatically use this token. Key Value --- ----- token hvs.TFGRHlDkvYAmO5O0HRP4Va9N token_accessor qsS6R1yDMjxUBYO2m9eEM07p token_duration ∞ token_renewable false token_policies ["root"] identity_policies [] policies ["root"] Success! Enabled the kv-v2 secrets engine at: secret/ + cat /tmp/tmp.eqR8FkcX4C Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.M6g5GPhpXb /tmp/tmp.eqR8FkcX4C + return 0 + kubectl_bin create secret generic vault-secret --from-literal=token=hvs.TFGRHlDkvYAmO5O0HRP4Va9N ++ mktemp + local LAST_OUT=/tmp/tmp.fqT7YEYAqw ++ mktemp + local LAST_ERR=/tmp/tmp.zDbHc6f1ia + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create secret generic vault-secret --from-literal=token=hvs.TFGRHlDkvYAmO5O0HRP4Va9N + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fqT7YEYAqw secret/vault-secret created + cat /tmp/tmp.zDbHc6f1ia + rm /tmp/tmp.fqT7YEYAqw /tmp/tmp.zDbHc6f1ia + return 0 + sleep 10 + wait_pod vault-service-0 + local pod=vault-service-0 + set +o xtrace waiting for pod/vault-service-0 to be ready.OK + sleep 20 + kubectl_bin exec vault-service-0 -- vault auth enable kubernetes ++ mktemp + local LAST_OUT=/tmp/tmp.a96F66YTc5 ++ mktemp + local LAST_ERR=/tmp/tmp.Sptx9QMcgS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec vault-service-0 -- vault auth enable kubernetes + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a96F66YTc5 Success! Enabled kubernetes auth method at: kubernetes/ + cat /tmp/tmp.Sptx9QMcgS + rm /tmp/tmp.a96F66YTc5 /tmp/tmp.Sptx9QMcgS + return 0 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/role-binding.yml + yq '.metadata.namespace="demand-backup-snapshot-vault-10718"' + yq '.subjects[0].namespace="psmdb-operator"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JLYbBy9h7P ++ mktemp + local LAST_ERR=/tmp/tmp.dwkOU2tyWH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JLYbBy9h7P rolebinding.rbac.authorization.k8s.io/vault-role-binding created + cat /tmp/tmp.dwkOU2tyWH + rm /tmp/tmp.JLYbBy9h7P /tmp/tmp.dwkOU2tyWH + return 0 ++ kubectl_bin exec vault-service-0 -- vault token create -policy=operator -format=json ++ jq -r .auth.client_token +++ mktemp ++ local LAST_OUT=/tmp/tmp.5HNsfOCRID +++ mktemp ++ local LAST_ERR=/tmp/tmp.bnJctrhDAk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec vault-service-0 -- vault token create -policy=operator -format=json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5HNsfOCRID ++ cat /tmp/tmp.bnJctrhDAk ++ rm /tmp/tmp.5HNsfOCRID /tmp/tmp.bnJctrhDAk ++ return 0 + token=hvs.CAESIPXtAnVPcwURcoVkizI7suFlulPFATj1TKgY6V2uEpY-Gh4KHGh2cy5KWEJFbDV5SWxFMURWWXhvSTdZV3dONUY + kubectl_bin create secret generic vault-sync-secret --from-literal=token=hvs.CAESIPXtAnVPcwURcoVkizI7suFlulPFATj1TKgY6V2uEpY-Gh4KHGh2cy5KWEJFbDV5SWxFMURWWXhvSTdZV3dONUY ++ mktemp + local LAST_OUT=/tmp/tmp.SwAuCffJs4 ++ mktemp + local LAST_ERR=/tmp/tmp.PdN1I9kJ4a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create secret generic vault-sync-secret --from-literal=token=hvs.CAESIPXtAnVPcwURcoVkizI7suFlulPFATj1TKgY6V2uEpY-Gh4KHGh2cy5KWEJFbDV5SWxFMURWWXhvSTdZV3dONUY + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SwAuCffJs4 secret/vault-sync-secret created + cat /tmp/tmp.PdN1I9kJ4a + rm /tmp/tmp.SwAuCffJs4 /tmp/tmp.PdN1I9kJ4a + return 0 + kubectl_bin exec vault-service-0 -- sh -c 'vault write auth/kubernetes/config kubernetes_host=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT' ++ mktemp + local LAST_OUT=/tmp/tmp.TMSkZFAixM ++ mktemp + local LAST_ERR=/tmp/tmp.aD5aRlGtZd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec vault-service-0 -- sh -c 'vault write auth/kubernetes/config kubernetes_host=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TMSkZFAixM Success! Data written to: auth/kubernetes/config + cat /tmp/tmp.aD5aRlGtZd + rm /tmp/tmp.TMSkZFAixM /tmp/tmp.aD5aRlGtZd + return 0 + kubectl_bin exec vault-service-0 -- sh -c $'vault policy write operator - < deletion_time n/a destroyed false version 1 + cat /tmp/tmp.Ug8wPY9lP7 + rm /tmp/tmp.VaXYcLkThl /tmp/tmp.Ug8wPY9lP7 + return 0 + rm -f /tmp/tmp.PJ008HzxTX /tmp/tmp.MT17Zyx3c8 + vault_append MONGODB_BACKUP_PASSWORD backup123456# + local key=MONGODB_BACKUP_PASSWORD + local value=backup123456# + local tmp_json ++ mktemp + tmp_json=/tmp/tmp.EiFX9Aajxh + local new_tmp_json ++ mktemp + new_tmp_json=/tmp/tmp.LKCs5u4Udu + kubectl_bin exec vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/demand-backup-snapshot-vault-10718/some-name/users' + jq .data.data ++ mktemp + local LAST_OUT=/tmp/tmp.hDCFpuVOkQ ++ mktemp + local LAST_ERR=/tmp/tmp.MZVjvWXQkR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/demand-backup-snapshot-vault-10718/some-name/users' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hDCFpuVOkQ + cat /tmp/tmp.MZVjvWXQkR + rm /tmp/tmp.hDCFpuVOkQ /tmp/tmp.MZVjvWXQkR + return 0 + '[' '!' -s /tmp/tmp.EiFX9Aajxh ']' + jq --arg key MONGODB_BACKUP_PASSWORD --arg value backup123456# '(. // {}) + {($key): $value}' /tmp/tmp.EiFX9Aajxh + kubectl_bin cp /tmp/tmp.LKCs5u4Udu vault-service-0:/tmp/data_new.json ++ mktemp + local LAST_OUT=/tmp/tmp.1wCgBiYhDw ++ mktemp + local LAST_ERR=/tmp/tmp.yKmoSlcOYp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /tmp/tmp.LKCs5u4Udu vault-service-0:/tmp/data_new.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1wCgBiYhDw + cat /tmp/tmp.yKmoSlcOYp + rm /tmp/tmp.1wCgBiYhDw /tmp/tmp.yKmoSlcOYp + return 0 + kubectl_bin exec vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/demand-backup-snapshot-vault-10718/some-name/users @"/tmp/data_new.json"' ++ mktemp + local LAST_OUT=/tmp/tmp.qCdLacuSU8 ++ mktemp + local LAST_ERR=/tmp/tmp.kb1HZAIFRy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/demand-backup-snapshot-vault-10718/some-name/users @"/tmp/data_new.json"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qCdLacuSU8 ================================ Secret Path ================================ secret/data/psmdb/operator/demand-backup-snapshot-vault-10718/some-name/users ======= Metadata ======= Key Value --- ----- created_time 2026-04-14T10:39:35.401839872Z custom_metadata deletion_time n/a destroyed false version 2 + cat /tmp/tmp.kb1HZAIFRy + rm /tmp/tmp.qCdLacuSU8 /tmp/tmp.kb1HZAIFRy + return 0 + rm -f /tmp/tmp.EiFX9Aajxh /tmp/tmp.LKCs5u4Udu + desc 'Deploying PSMDB cluster with Vault and snapshot backup' + set +o xtrace ----------------------------------------------------------------------------------- Deploying PSMDB cluster with Vault and snapshot backup ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.dlrKD3Ocyb ++ mktemp + local LAST_ERR=/tmp/tmp.MAFcU4QmtD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dlrKD3Ocyb secret/some-users created + cat /tmp/tmp.MAFcU4QmtD + rm /tmp/tmp.dlrKD3Ocyb /tmp/tmp.MAFcU4QmtD + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2256-98b7d85f"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/demand-backup-snapshot-vault-10718/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.Lz0S6pYoQ1 ++ mktemp + local LAST_ERR=/tmp/tmp.ok3tdI99mg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lz0S6pYoQ1 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.ok3tdI99mg + rm /tmp/tmp.Lz0S6pYoQ1 /tmp/tmp.ok3tdI99mg + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.3tpxKSIqdP ++ mktemp + local LAST_ERR=/tmp/tmp.E82ZLin04o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3tpxKSIqdP deployment.apps/psmdb-client created + cat /tmp/tmp.E82ZLin04o + rm /tmp/tmp.3tpxKSIqdP /tmp/tmp.E82ZLin04o + return 0 + echo 'check if all pods started' check if all pods started + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AXJfx3xZFz +++ mktemp ++ local LAST_ERR=/tmp/tmp.eHBZlvi2WU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AXJfx3xZFz ++ cat /tmp/tmp.eHBZlvi2WU ++ rm /tmp/tmp.AXJfx3xZFz /tmp/tmp.eHBZlvi2WU ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qYxwhotRaO +++ mktemp ++ local LAST_ERR=/tmp/tmp.UqIAJgRmxJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qYxwhotRaO ++ cat /tmp/tmp.UqIAJgRmxJ ++ rm /tmp/tmp.qYxwhotRaO /tmp/tmp.UqIAJgRmxJ ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lqOpwlApXb +++ mktemp ++ local LAST_ERR=/tmp/tmp.d6N6DDmZkF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lqOpwlApXb ++ cat /tmp/tmp.d6N6DDmZkF ++ rm /tmp/tmp.lqOpwlApXb /tmp/tmp.d6N6DDmZkF ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness................ + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.65sfEh0ORL +++ mktemp ++ local LAST_ERR=/tmp/tmp.iT1lyVNRx7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.65sfEh0ORL ++ cat /tmp/tmp.iT1lyVNRx7 ++ rm /tmp/tmp.65sfEh0ORL /tmp/tmp.iT1lyVNRx7 ++ return 0 + [[ ready == ready ]] + echo .OK .OK + sleep 60 + wait_for_pbm_operations some-name + local cluster=some-name + set +o xtrace waiting for PBM operation to finish.OK + desc 'Writing test data' + set +o xtrace ----------------------------------------------------------------------------------- Writing test data ----------------------------------------------------------------------------------- + run_mongo_tls 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-vault-10718 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-vault-10718 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7MlXk57cJz +++ mktemp ++ local LAST_ERR=/tmp/tmp.e9E0JFMeqo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7MlXk57cJz ++ cat /tmp/tmp.e9E0JFMeqo ++ rm /tmp/tmp.7MlXk57cJz /tmp/tmp.e9E0JFMeqo ++ return 0 + local client_container=psmdb-client-5649fbb65f-t48kz + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-vault-10718 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.Uv1Uhh9VmV ++ mktemp + local LAST_ERR=/tmp/tmp.xAed3Rtro8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Uv1Uhh9VmV Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017,some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-14T10:43:22.466Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("95cfd34b-2dda-4110-9ebc-d7762ea2ac4d") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.xAed3Rtro8 + rm /tmp/tmp.Uv1Uhh9VmV /tmp/tmp.xAed3Rtro8 + return 0 + sleep 1 + run_mongo_tls 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wDL2BgWTKw +++ mktemp ++ local LAST_ERR=/tmp/tmp.GVP8RwNR32 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wDL2BgWTKw ++ cat /tmp/tmp.GVP8RwNR32 ++ rm /tmp/tmp.wDL2BgWTKw /tmp/tmp.GVP8RwNR32 ++ return 0 + local client_container=psmdb-client-5649fbb65f-t48kz + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.prskxxLnbw ++ mktemp + local LAST_ERR=/tmp/tmp.lvbpivCnof + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.prskxxLnbw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-14T10:43:25.894Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b8523016-0e25-4db8-bcbe-fb9250dfae64") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.lvbpivCnof + rm /tmp/tmp.prskxxLnbw /tmp/tmp.lvbpivCnof + return 0 + sleep 5 + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-14T10:43:31+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MoO47kme3t +++ mktemp ++ local LAST_ERR=/tmp/tmp.jhrxA4oZOd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MoO47kme3t ++ cat /tmp/tmp.jhrxA4oZOd ++ rm /tmp/tmp.MoO47kme3t /tmp/tmp.jhrxA4oZOd ++ return 0 + local client_container=psmdb-client-5649fbb65f-t48kz + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.I4fcBhGDfs ++ mktemp + local LAST_ERR=/tmp/tmp.slUusJOEMG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I4fcBhGDfs + cat /tmp/tmp.slUusJOEMG + rm /tmp/tmp.I4fcBhGDfs /tmp/tmp.slUusJOEMG + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/compare/find.json /tmp/tmp.Vwcrmn8qQg/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-14T10:43:34+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XscYjZ76yZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.FdG4WPqJMa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XscYjZ76yZ ++ cat /tmp/tmp.FdG4WPqJMa ++ rm /tmp/tmp.XscYjZ76yZ /tmp/tmp.FdG4WPqJMa ++ return 0 + local client_container=psmdb-client-5649fbb65f-t48kz + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.sVANs7F6ze ++ mktemp + local LAST_ERR=/tmp/tmp.8nqeBnhRt3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sVANs7F6ze + cat /tmp/tmp.8nqeBnhRt3 + rm /tmp/tmp.sVANs7F6ze /tmp/tmp.8nqeBnhRt3 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/compare/find.json /tmp/tmp.Vwcrmn8qQg/find + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718 '' '' '' '' '' true + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718 + local postfix= + local suffix= + local database=myApp + local collection=test + local sort= + local tls=true + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-14T10:43:38+0000] running db.test.find() in myApp + [[ true == true ]] + mongo_command=run_mongo_tls + run_mongo_tls 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718 + local driver=mongodb + local suffix=.svc.cluster.local + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PAvel6Lcyv +++ mktemp ++ local LAST_ERR=/tmp/tmp.PxmDHYMTWO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PAvel6Lcyv ++ cat /tmp/tmp.PxmDHYMTWO ++ rm /tmp/tmp.PAvel6Lcyv /tmp/tmp.PxmDHYMTWO ++ return 0 + local client_container=psmdb-client-5649fbb65f-t48kz + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.FGBZMA9dpG ++ mktemp + local LAST_ERR=/tmp/tmp.mTsFbKxPGp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FGBZMA9dpG + cat /tmp/tmp.mTsFbKxPGp + rm /tmp/tmp.FGBZMA9dpG /tmp/tmp.mTsFbKxPGp + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/compare/find.json /tmp/tmp.Vwcrmn8qQg/find + desc 'Running snapshot backup' + set +o xtrace ----------------------------------------------------------------------------------- Running snapshot backup ----------------------------------------------------------------------------------- + backup_name=backup-snapshot-vault + run_snapshot_backup backup-snapshot-vault + local backup_name=backup-snapshot-vault + log 'running snapshot backup backup-snapshot-vault' + set +o xtrace [2026-04-14T10:43:42+0000] running snapshot backup backup-snapshot-vault + yq eval '.metadata.name = "backup-snapshot-vault" | .spec.volumeSnapshotClass = "gke-snapshot-class"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/backup.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9qVDJebMlH ++ mktemp + local LAST_ERR=/tmp/tmp.egYUS6A74b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9qVDJebMlH perconaservermongodbbackup.psmdb.percona.com/backup-snapshot-vault created + cat /tmp/tmp.egYUS6A74b + rm /tmp/tmp.9qVDJebMlH /tmp/tmp.egYUS6A74b + return 0 + wait_backup backup-snapshot-vault + local backup_name=backup-snapshot-vault + local target_state=ready + set +o xtrace waiting for backup-snapshot-vault to reach ready state........................OK + desc 'Drop collection and restore from snapshot' + set +o xtrace ----------------------------------------------------------------------------------- Drop collection and restore from snapshot ----------------------------------------------------------------------------------- + run_mongo_tls 'use myApp\n db.test.drop()' myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718 + local 'command=use myApp\n db.test.drop()' + local uri=myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4YGGYqeF0I +++ mktemp ++ local LAST_ERR=/tmp/tmp.TCTR6kFxRm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4YGGYqeF0I ++ cat /tmp/tmp.TCTR6kFxRm ++ rm /tmp/tmp.4YGGYqeF0I /tmp/tmp.TCTR6kFxRm ++ return 0 + local client_container=psmdb-client-5649fbb65f-t48kz + local mongo_flag= + local port=27017 ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 + [[ myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' ++ mktemp + local LAST_OUT=/tmp/tmp.jrTmYCXIug ++ mktemp + local LAST_ERR=/tmp/tmp.BekIz61SHr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-5649fbb65f-t48kz -- bash -c 'printf '\''use myApp\n db.test.drop()\n'\'' | mongo mongodb+srv://myApp:myPass@some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?replicaSet=rs0 --tls --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsCertificateKeyFile /tmp/tls.pem --tlsAllowInvalidHostnames ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jrTmYCXIug Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017,some-name-rs0-1.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017,some-name-rs0-2.some-name-rs0.demand-backup-snapshot-vault-10718.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=true {"t":{"$date":"2026-04-14T10:44:41.123Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("245f5994-8253-4101-9568-cb46ba537b1d") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp true bye + cat /tmp/tmp.BekIz61SHr + rm /tmp/tmp.jrTmYCXIug /tmp/tmp.BekIz61SHr + return 0 + run_restore backup-snapshot-vault + local backup_name=backup-snapshot-vault + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/restore.yml + log 'running restore restore-backup-snapshot-vault' + set +o xtrace [2026-04-14T10:44:41+0000] running restore restore-backup-snapshot-vault + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2256/e2e-tests/demand-backup-snapshot-vault/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-snapshot-vault/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-snapshot-vault/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.xHMy8R9PKi ++ mktemp + local LAST_ERR=/tmp/tmp.qaldukv1Ir + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xHMy8R9PKi perconaservermongodbrestore.psmdb.percona.com/restore-backup-snapshot-vault created + cat /tmp/tmp.qaldukv1Ir + rm /tmp/tmp.xHMy8R9PKi /tmp/tmp.qaldukv1Ir + return 0 + run_snapshot_recovery_check backup-snapshot-vault + local backup_name=backup-snapshot-vault + wait_restore backup-snapshot-vault some-name ready 0 3000 + local backup_name=backup-snapshot-vault + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=0 + local wait_time=3000 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-snapshot-vault object to be created.OK Waiting psmdb-restore/restore-backup-snapshot-vault to reach state "ready" ...2026-04-14T10:47:18.728Z DEBUG checking if restore is allowed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-snapshot-vault","namespace":"demand-backup-snapshot-vault-10718"}, "namespace": "demand-backup-snapshot-vault-10718", "name": "restore-backup-snapshot-vault", "reconcileID": "28240f1c-dfb0-4fa6-9cc2-b4982fdba38c", "cluster": "some-name", "namespace": "demand-backup-snapshot-vault-10718"} 2026-04-14T10:47:19.445Z ERROR failed to make restore {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-snapshot-vault","namespace":"demand-backup-snapshot-vault-10718"}, "namespace": "demand-backup-snapshot-vault-10718", "name": "restore-backup-snapshot-vault", "reconcileID": "28240f1c-dfb0-4fa6-9cc2-b4982fdba38c", "restore": "restore-backup-snapshot-vault", "backup": "backup-snapshot-vault", "error": "describe restore stderr: stdout: : unable to upgrade connection: container not found (\"mongod\")", "errorVerbose": "unable to upgrade connection: container not found (\"mongod\")\ndescribe restore stderr: stdout: \ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcileSnapshotRequested.func2\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/snapshots.go:181\nk8s.io/client-go/util/retry.OnError.func1\n\t/go/pkg/mod/k8s.io/client-go@v0.35.2/util/retry/util.go:51\nk8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtection\n\t/go/pkg/mod/k8s.io/apimachinery@v0.35.2/pkg/util/wait/wait.go:150\nk8s.io/apimachinery/pkg/util/wait.ExponentialBackoff\n\t/go/pkg/mod/k8s.io/apimachinery@v0.35.2/pkg/util/wait/backoff.go:477\nk8s.io/client-go/util/retry.OnError\n\t/go/pkg/mod/k8s.io/client-go@v0.35.2/util/retry/util.go:50\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcileSnapshotRequested\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/snapshots.go:160\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcileExternalSnapshotRestore\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/snapshots.go:46\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/psmdb_restore_controller.go:266\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} 2026-04-14T10:47:19.446Z INFO Restore state changed {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-snapshot-vault","namespace":"demand-backup-snapshot-vault-10718"}, "namespace": "demand-backup-snapshot-vault-10718", "name": "restore-backup-snapshot-vault", "reconcileID": "28240f1c-dfb0-4fa6-9cc2-b4982fdba38c", "previous": "requested", "current": "error"} 2026-04-14T10:47:20.006Z INFO Warning: Reconciler returned both a result with either RequeueAfter or Requeue set and a non-nil error. RequeueAfter and Requeue will always be ignored if the error is non-nil. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-snapshot-vault","namespace":"demand-backup-snapshot-vault-10718"}, "namespace": "demand-backup-snapshot-vault-10718", "name": "restore-backup-snapshot-vault", "reconcileID": "28240f1c-dfb0-4fa6-9cc2-b4982fdba38c"} 2026-04-14T10:47:20.006Z ERROR Reconciler error {"controller": "psmdbrestore-controller", "controllerGroup": "psmdb.percona.com", "controllerKind": "PerconaServerMongoDBRestore", "PerconaServerMongoDBRestore": {"name":"restore-backup-snapshot-vault","namespace":"demand-backup-snapshot-vault-10718"}, "namespace": "demand-backup-snapshot-vault-10718", "name": "restore-backup-snapshot-vault", "reconcileID": "28240f1c-dfb0-4fa6-9cc2-b4982fdba38c", "error": "reconcile external snapshot restore: describe restore stderr: stdout: : unable to upgrade connection: container not found (\"mongod\")", "errorVerbose": "unable to upgrade connection: container not found (\"mongod\")\ndescribe restore stderr: stdout: \ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcileSnapshotRequested.func2\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/snapshots.go:181\nk8s.io/client-go/util/retry.OnError.func1\n\t/go/pkg/mod/k8s.io/client-go@v0.35.2/util/retry/util.go:51\nk8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtection\n\t/go/pkg/mod/k8s.io/apimachinery@v0.35.2/pkg/util/wait/wait.go:150\nk8s.io/apimachinery/pkg/util/wait.ExponentialBackoff\n\t/go/pkg/mod/k8s.io/apimachinery@v0.35.2/pkg/util/wait/backoff.go:477\nk8s.io/client-go/util/retry.OnError\n\t/go/pkg/mod/k8s.io/client-go@v0.35.2/util/retry/util.go:50\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcileSnapshotRequested\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/snapshots.go:160\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).reconcileExternalSnapshotRestore\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/snapshots.go:46\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/psmdb_restore_controller.go:266\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nreconcile external snapshot restore\ngithub.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore.(*ReconcilePerconaServerMongoDBRestore).Reconcile\n\t/go/src/github.com/percona/percona-server-mongodb-operator/pkg/controller/perconaservermongodbrestore/psmdb_restore_controller.go:268\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} apiVersion: psmdb.percona.com/v1 kind: PerconaServerMongoDBRestore metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"psmdb.percona.com/v1","kind":"PerconaServerMongoDBRestore","metadata":{"annotations":{},"name":"restore-backup-snapshot-vault","namespace":"demand-backup-snapshot-vault-10718"},"spec":{"backupName":"backup-snapshot-vault","clusterName":"some-name"}} creationTimestamp: "2026-04-14T10:44:44Z" generation: 1 name: restore-backup-snapshot-vault namespace: demand-backup-snapshot-vault-10718 resourceVersion: "1776163639460447018" uid: 0710031b-afa5-4e8d-a5f0-31468f205876 spec: backupName: backup-snapshot-vault clusterName: some-name status: error: 'describe restore stderr: stdout: : unable to upgrade connection: container not found ("mongod")' pbmName: "2026-04-14T10:46:36.729676481Z" state: error Name: restore-backup-snapshot-vault Namespace: demand-backup-snapshot-vault-10718 Labels: Annotations: API Version: psmdb.percona.com/v1 Kind: PerconaServerMongoDBRestore Metadata: Creation Timestamp: 2026-04-14T10:44:44Z Generation: 1 Resource Version: 1776163639460447018 UID: 0710031b-afa5-4e8d-a5f0-31468f205876 Spec: Backup Name: backup-snapshot-vault Cluster Name: some-name Status: Error: describe restore stderr: stdout: : unable to upgrade connection: container not found ("mongod") Pbm Name: 2026-04-14T10:46:36.729676481Z State: error Events: Restore object restore-backup-snapshot-vault is in error state after 3 minutes. something went wrong with operator or kubernetes cluster