Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/logs/users-vault.log Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + vault_name=users-vault-service + main + psmdb=some-name + cluster=some-name-rs0 + create_infra users-vault-7177 + local ns=users-vault-7177 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.d7SL9Qb4T2 ++ mktemp + local LAST_ERR=/tmp/tmp.PDt3MpEC6r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d7SL9Qb4T2 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.PDt3MpEC6r + rm /tmp/tmp.d7SL9Qb4T2 /tmp/tmp.PDt3MpEC6r + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5DGrUfxprU ++ mktemp + local LAST_ERR=/tmp/tmp.Eb8K7Lhr86 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5DGrUfxprU + cat /tmp/tmp.Eb8K7Lhr86 + rm /tmp/tmp.5DGrUfxprU /tmp/tmp.Eb8K7Lhr86 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ufe2qCYg7c ++ mktemp + local LAST_ERR=/tmp/tmp.3W2oEqi963 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ufe2qCYg7c + cat /tmp/tmp.3W2oEqi963 + rm /tmp/tmp.ufe2qCYg7c /tmp/tmp.3W2oEqi963 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.yxANbjOiU5 ++ mktemp + local LAST_ERR=/tmp/tmp.PJak9Kf0L0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yxANbjOiU5 + cat /tmp/tmp.PJak9Kf0L0 + rm /tmp/tmp.yxANbjOiU5 /tmp/tmp.PJak9Kf0L0 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.PP6H8KKGCb ++ mktemp + local LAST_ERR=/tmp/tmp.XwVNWe7o2d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PP6H8KKGCb clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.XwVNWe7o2d + rm /tmp/tmp.PP6H8KKGCb /tmp/tmp.XwVNWe7o2d + return 0 + check_crd_for_deletion PR-2205-44b3f99f + local git_tag=PR-2205-44b3f99f ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2205-44b3f99f/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fw7Cbwms3o +++ mktemp ++ local LAST_ERR=/tmp/tmp.hSqYVx4WT8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.fw7Cbwms3o ++ cat /tmp/tmp.hSqYVx4WT8 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.fw7Cbwms3o ++ cat /tmp/tmp.hSqYVx4WT8 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.fw7Cbwms3o ++ cat /tmp/tmp.hSqYVx4WT8 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.fw7Cbwms3o ++ cat /tmp/tmp.hSqYVx4WT8 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.fw7Cbwms3o /tmp/tmp.hSqYVx4WT8 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator+ xargs kubectl delete ns ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.tR9dsiuRja ++ mktemp + local LAST_OUT=/tmp/tmp.QFlioztrn6 ++ mktemp + local LAST_ERR=/tmp/tmp.SZSAxI2GaE + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.OOBLZRKiB8 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tR9dsiuRja + cat /tmp/tmp.SZSAxI2GaE + rm /tmp/tmp.tR9dsiuRja /tmp/tmp.SZSAxI2GaE + return 0 namespace "users-vault-3560" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QFlioztrn6 namespace "psmdb-operator" deleted + cat /tmp/tmp.OOBLZRKiB8 + rm /tmp/tmp.QFlioztrn6 /tmp/tmp.OOBLZRKiB8 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.u7IQJN4qZd ++ mktemp + local LAST_ERR=/tmp/tmp.Zq59vnf6kA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u7IQJN4qZd + cat /tmp/tmp.Zq59vnf6kA + rm /tmp/tmp.u7IQJN4qZd /tmp/tmp.Zq59vnf6kA + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.sJc2YSgJem ++ mktemp + local LAST_ERR=/tmp/tmp.iU2ydpuuQw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sJc2YSgJem namespace/psmdb-operator created + cat /tmp/tmp.iU2ydpuuQw + rm /tmp/tmp.sJc2YSgJem /tmp/tmp.iU2ydpuuQw + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.f4yEcozxyT +++ mktemp ++ local LAST_ERR=/tmp/tmp.QqGIJ10FnE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f4yEcozxyT ++ cat /tmp/tmp.QqGIJ10FnE ++ rm /tmp/tmp.f4yEcozxyT /tmp/tmp.QqGIJ10FnE ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.aQ3Ev7N4nU ++ mktemp + local LAST_ERR=/tmp/tmp.kRTrxYkRSj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aQ3Ev7N4nU Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster2" modified. + cat /tmp/tmp.kRTrxYkRSj + rm /tmp/tmp.aQ3Ev7N4nU /tmp/tmp.kRTrxYkRSj + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/users-vault/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.A5yrciDMQ8 ++ mktemp + local LAST_ERR=/tmp/tmp.N93r3RI5SU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A5yrciDMQ8 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.N93r3RI5SU + rm /tmp/tmp.A5yrciDMQ8 /tmp/tmp.N93r3RI5SU + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XuAIfpGOxm ++ mktemp + local LAST_ERR=/tmp/tmp.AWwUoOkbt1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XuAIfpGOxm clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.AWwUoOkbt1 + rm /tmp/tmp.XuAIfpGOxm /tmp/tmp.AWwUoOkbt1 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PJOKPpOkim ++ mktemp + local LAST_ERR=/tmp/tmp.KTsz3EcJu8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PJOKPpOkim deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.KTsz3EcJu8 + rm /tmp/tmp.PJOKPpOkim /tmp/tmp.KTsz3EcJu8 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.o9MErS0SYW +++ mktemp ++ local LAST_ERR=/tmp/tmp.LyLf6CW3lY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o9MErS0SYW ++ cat /tmp/tmp.LyLf6CW3lY ++ rm /tmp/tmp.o9MErS0SYW /tmp/tmp.LyLf6CW3lY ++ return 0 + wait_operator_pod percona-server-mongodb-operator-54d9d7959c-d274h + local pod=percona-server-mongodb-operator-54d9d7959c-d274h + set +o xtrace waiting for pod/percona-server-mongodb-operator-54d9d7959c-d274h to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.YcHYopz2kG +++ mktemp ++ local LAST_ERR=/tmp/tmp.VyERaM8I5L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YcHYopz2kG ++ cat /tmp/tmp.VyERaM8I5L ++ rm /tmp/tmp.YcHYopz2kG /tmp/tmp.VyERaM8I5L ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-54d9d7959c-d274h ++ mktemp + local LAST_OUT=/tmp/tmp.Mxe4XWPKfg ++ mktemp + local LAST_ERR=/tmp/tmp.8LlA9fXTTW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-54d9d7959c-d274h + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Mxe4XWPKfg + cat /tmp/tmp.8LlA9fXTTW + rm /tmp/tmp.Mxe4XWPKfg /tmp/tmp.8LlA9fXTTW + return 0 2026-01-21T14:19:15.374Z INFO setup Manager starting up {"gitCommit": "44b3f99f3dd18eea1a45598be537e3a62c41314e", "gitBranch": "PR-2205-44b3f99f", "buildTime": "", "goVersion": "go1.25.6", "os": "linux", "arch": "amd64"} + create_namespace users-vault-7177 + local namespace=users-vault-7177 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces users-vault-7177' + xargs kubectl delete ns ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces users-vault-7177 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace users-vault-7177 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.dJEhUTTUdz + local LAST_OUT=/tmp/tmp.2wgDTUfJJ3 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.ywCRZvmbs2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.9kQbkXdKOa + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace users-vault-7177 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dJEhUTTUdz + cat /tmp/tmp.ywCRZvmbs2 + rm /tmp/tmp.dJEhUTTUdz /tmp/tmp.ywCRZvmbs2 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2wgDTUfJJ3 + cat /tmp/tmp.9kQbkXdKOa + rm /tmp/tmp.2wgDTUfJJ3 /tmp/tmp.9kQbkXdKOa + return 0 + kubectl_bin wait --for=delete namespace users-vault-7177 ++ mktemp + local LAST_OUT=/tmp/tmp.oYuP1e9oKq ++ mktemp + local LAST_ERR=/tmp/tmp.4l5VbbWTOV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace users-vault-7177 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oYuP1e9oKq + cat /tmp/tmp.4l5VbbWTOV + rm /tmp/tmp.oYuP1e9oKq /tmp/tmp.4l5VbbWTOV + return 0 + desc 'create namespace users-vault-7177' + set +o xtrace ----------------------------------------------------------------------------------- create namespace users-vault-7177 ----------------------------------------------------------------------------------- + kubectl_bin create namespace users-vault-7177 ++ mktemp + local LAST_OUT=/tmp/tmp.byCOD8kPby ++ mktemp + local LAST_ERR=/tmp/tmp.FdM4GKCTWZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace users-vault-7177 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.byCOD8kPby namespace/users-vault-7177 created + cat /tmp/tmp.FdM4GKCTWZ + rm /tmp/tmp.byCOD8kPby /tmp/tmp.FdM4GKCTWZ + return 0 + set_kube_ctx users-vault-7177 + local namespace=users-vault-7177 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.FWBw3UX5cZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.QMRmdjshT7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FWBw3UX5cZ ++ cat /tmp/tmp.QMRmdjshT7 ++ rm /tmp/tmp.FWBw3UX5cZ /tmp/tmp.QMRmdjshT7 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster2 --namespace=users-vault-7177 ++ mktemp + local LAST_OUT=/tmp/tmp.kfdKqksRLB ++ mktemp + local LAST_ERR=/tmp/tmp.DRTu2uPZj8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster2 --namespace=users-vault-7177 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kfdKqksRLB Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2205-44b3f99f-6-cluster2" modified. + cat /tmp/tmp.DRTu2uPZj8 + rm /tmp/tmp.kfdKqksRLB /tmp/tmp.DRTu2uPZj8 + return 0 + desc 'start client' + set +o xtrace ----------------------------------------------------------------------------------- start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.960BpSq1D0 ++ mktemp + local LAST_ERR=/tmp/tmp.x1Ormtwfth + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.960BpSq1D0 deployment.apps/psmdb-client created + cat /tmp/tmp.x1Ormtwfth + rm /tmp/tmp.960BpSq1D0 /tmp/tmp.x1Ormtwfth + return 0 + setup_vault + local sa_namespace=users-vault-7177 + '[' -n psmdb-operator ']' + sa_namespace=psmdb-operator + local tls_secret_name=vault-server-tls + create_tls_secret vault-server-tls users-vault-service + local name=vault-server-tls + local service_name=users-vault-service + local csr_name=vault-server-tls-csr-28701 + local tmp_dir ++ mktemp -d + tmp_dir=/tmp/tmp.omAYLSltx7 + openssl genrsa -out /tmp/tmp.omAYLSltx7/vault.key 2048 + cat + openssl req -new -key /tmp/tmp.omAYLSltx7/vault.key -subj '/CN=system:node:users-vault-service.users-vault-7177.svc;/O=system:nodes' -out /tmp/tmp.omAYLSltx7/server.csr -config /tmp/tmp.omAYLSltx7/csr.conf + cat ++ cat /tmp/tmp.omAYLSltx7/server.csr ++ base64 ++ tr -d '\n' + kubectl create -f /tmp/tmp.omAYLSltx7/csr.yaml certificatesigningrequest.certificates.k8s.io/vault-server-tls-csr-28701 created + sleep 10 + kubectl certificate approve vault-server-tls-csr-28701 certificatesigningrequest.certificates.k8s.io/vault-server-tls-csr-28701 approved + local serverCert ++ kubectl get csr vault-server-tls-csr-28701 -o 'jsonpath={.status.certificate}' + serverCert=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUU4VENDQTFtZ0F3SUJBZ0lRRG5JT0ZyMGp1cFJkN0Y4OXB0RnFnREFOQmdrcWhraUc5dzBCQVFzRkFEQXYKTVMwd0t3WURWUVFERXlSaE56TTRNVFV5WWkxbFlqVmtMVFEwWkdJdFlUTXpZUzFpWWpFd01tUXpOVFF6Tm1FdwpIaGNOTWpZd01USXhNVFF4T0RBeVdoY05NekV3TVRJd01UUXlNREF5V2pCWE1SVXdFd1lEVlFRS0V3eHplWE4wClpXMDZibTlrWlhNeFBqQThCZ05WQkFNTU5YTjVjM1JsYlRwdWIyUmxPblZ6WlhKekxYWmhkV3gwTFhObGNuWnAKWTJVdWRYTmxjbk10ZG1GMWJIUXROekUzTnk1emRtTTdNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QQpNSUlCQ2dLQ0FRRUFqczdkNjJaaHluRFhRYkQwNXI5T2xDcmo1RGpRWnZQUDI1L3dCN01nOWU0TnVXUXRVcytDCkxnaWQ5UVowWmcxS1JkUjBIVUg3VmxGUmkxZ2JYamVwVFlKZCtiSVhzdk5aMmhvVXpScnZGVUk0TWZncU92SkUKeDhDdWh1M0lKUWswTXNBZ2NJNi9iOVV6T2x6UE1CejZmVjZJNDgyRnkzSDJLZS9iYndobjdMUTE5NW5jcXlOZApQZERnZUlkUWU5WVFJaHlVdk5RTVV0NXdNM25xakU5MG5rOHlid1Y1MmxQVGo0NWhsL3JjZkVxR2lYNktvOGk1CmJRd09lRVIvbllkOXFKM3lzNmdKV2dPRkpvd2F0NlRzejFwQlBvaU5ycHVVN1hHSHdyZm43a1RMa0lnT1JzblQKQ1BBUWlKSStxREVURnBqaURialBrNXE5OHdiTGNYNm9EUUlEQVFBQm80SUJYekNDQVZzd0RnWURWUjBQQVFILwpCQVFEQWdXZ01CTUdBMVVkSlFRTU1Bb0dDQ3NHQVFVRkJ3TUJNQXdHQTFVZEV3RUIvd1FDTUFBd0h3WURWUjBqCkJCZ3dGb0FVNVN3cCtjenROVDliMWg0dG1vMHpjSFFQUjdVd2dnRURCZ05WSFJFRWdmc3dnZmlDRTNWelpYSnoKTFhaaGRXeDBMWE5sY25acFkyV0NGU291ZFhObGNuTXRkbUYxYkhRdGMyVnlkbWxqWllJbUtpNTFjMlZ5Y3kxMgpZWFZzZEMxelpYSjJhV05sTG5WelpYSnpMWFpoZFd4MExUY3hOemVDS2lvdWRYTmxjbk10ZG1GMWJIUXRjMlZ5CmRtbGpaUzUxYzJWeWN5MTJZWFZzZEMwM01UYzNMbk4yWTRJMmRYTmxjbk10ZG1GMWJIUXRjMlZ5ZG1salpTNTEKYzJWeWN5MTJZWFZzZEMwM01UYzNMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNnamdxTG5WelpYSnpMWFpoZFd4MApMWE5sY25acFkyVXVkWE5sY25NdGRtRjFiSFF0TnpFM055NXpkbU11WTJ4MWMzUmxjaTVzYjJOaGJJY0Vmd0FBCkFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FZRUFEbWFjWHJTZkRQNEVjU2FvdEVPNld4cmVIUTkwZWFxRkVnczEKTkViOHgzLzFkREFsckllVVVzQk1pWnBReTFnMlYvWGszQTRJWHNYQXNnQ1BjdzVVN3djcGJkaXNEeHpObXY4SgpETm5Md3ZDL2dvNXhiNEYrKzVUNXVIK0swT1RyZU5nelJpUis5Z0ZVVWErYjFyQTA2Y0k4WW5xRDR4RGhUR3ZNClZwbWtJK2IyblNibTFJTXNNd3hzTXRMWGdGeklSK3c0MnJBVnBEcjZPYmNYbm9wbUZyQm5IZldEaW5kVUFpaE4Ka0h3bUFpSVVaTi9meGQvT3hJY3ZsbUFuVkZrNllzZmlLcExKUERyYnUzaTN1OFFLcGtnc1MxRlhVZG9wcWhQLwpwd0RtUTdYejg0dVI5VnR6Sk8xNkdNU213T1cyZkNNaFRXNk5uOG9pYWJ3NWZKUWRTQStTSW5aRTBxL3hReEdYCmNUWXlXekh3YTJCZWxxUmNoSzdnNG5OY1M3WHhpYllRa05lcms0eDU3bmkxaFVDenVJUzRGeUQ0Uks4akJRdWYKdHNiNktoOFJFcElpTE96RklzSlpOazlFLzFpUFBIaS82aGdhSGdEb1BnNmN3ci85ajNscm42TzJMUjR4NnErOAoyb3luK0lTNmNtenVaNmxZNlIzbzlRMlRSc0d1Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + echo LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUU4VENDQTFtZ0F3SUJBZ0lRRG5JT0ZyMGp1cFJkN0Y4OXB0RnFnREFOQmdrcWhraUc5dzBCQVFzRkFEQXYKTVMwd0t3WURWUVFERXlSaE56TTRNVFV5WWkxbFlqVmtMVFEwWkdJdFlUTXpZUzFpWWpFd01tUXpOVFF6Tm1FdwpIaGNOTWpZd01USXhNVFF4T0RBeVdoY05NekV3TVRJd01UUXlNREF5V2pCWE1SVXdFd1lEVlFRS0V3eHplWE4wClpXMDZibTlrWlhNeFBqQThCZ05WQkFNTU5YTjVjM1JsYlRwdWIyUmxPblZ6WlhKekxYWmhkV3gwTFhObGNuWnAKWTJVdWRYTmxjbk10ZG1GMWJIUXROekUzTnk1emRtTTdNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QQpNSUlCQ2dLQ0FRRUFqczdkNjJaaHluRFhRYkQwNXI5T2xDcmo1RGpRWnZQUDI1L3dCN01nOWU0TnVXUXRVcytDCkxnaWQ5UVowWmcxS1JkUjBIVUg3VmxGUmkxZ2JYamVwVFlKZCtiSVhzdk5aMmhvVXpScnZGVUk0TWZncU92SkUKeDhDdWh1M0lKUWswTXNBZ2NJNi9iOVV6T2x6UE1CejZmVjZJNDgyRnkzSDJLZS9iYndobjdMUTE5NW5jcXlOZApQZERnZUlkUWU5WVFJaHlVdk5RTVV0NXdNM25xakU5MG5rOHlid1Y1MmxQVGo0NWhsL3JjZkVxR2lYNktvOGk1CmJRd09lRVIvbllkOXFKM3lzNmdKV2dPRkpvd2F0NlRzejFwQlBvaU5ycHVVN1hHSHdyZm43a1RMa0lnT1JzblQKQ1BBUWlKSStxREVURnBqaURialBrNXE5OHdiTGNYNm9EUUlEQVFBQm80SUJYekNDQVZzd0RnWURWUjBQQVFILwpCQVFEQWdXZ01CTUdBMVVkSlFRTU1Bb0dDQ3NHQVFVRkJ3TUJNQXdHQTFVZEV3RUIvd1FDTUFBd0h3WURWUjBqCkJCZ3dGb0FVNVN3cCtjenROVDliMWg0dG1vMHpjSFFQUjdVd2dnRURCZ05WSFJFRWdmc3dnZmlDRTNWelpYSnoKTFhaaGRXeDBMWE5sY25acFkyV0NGU291ZFhObGNuTXRkbUYxYkhRdGMyVnlkbWxqWllJbUtpNTFjMlZ5Y3kxMgpZWFZzZEMxelpYSjJhV05sTG5WelpYSnpMWFpoZFd4MExUY3hOemVDS2lvdWRYTmxjbk10ZG1GMWJIUXRjMlZ5CmRtbGpaUzUxYzJWeWN5MTJZWFZzZEMwM01UYzNMbk4yWTRJMmRYTmxjbk10ZG1GMWJIUXRjMlZ5ZG1salpTNTEKYzJWeWN5MTJZWFZzZEMwM01UYzNMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNnamdxTG5WelpYSnpMWFpoZFd4MApMWE5sY25acFkyVXVkWE5sY25NdGRtRjFiSFF0TnpFM055NXpkbU11WTJ4MWMzUmxjaTVzYjJOaGJJY0Vmd0FBCkFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FZRUFEbWFjWHJTZkRQNEVjU2FvdEVPNld4cmVIUTkwZWFxRkVnczEKTkViOHgzLzFkREFsckllVVVzQk1pWnBReTFnMlYvWGszQTRJWHNYQXNnQ1BjdzVVN3djcGJkaXNEeHpObXY4SgpETm5Md3ZDL2dvNXhiNEYrKzVUNXVIK0swT1RyZU5nelJpUis5Z0ZVVWErYjFyQTA2Y0k4WW5xRDR4RGhUR3ZNClZwbWtJK2IyblNibTFJTXNNd3hzTXRMWGdGeklSK3c0MnJBVnBEcjZPYmNYbm9wbUZyQm5IZldEaW5kVUFpaE4Ka0h3bUFpSVVaTi9meGQvT3hJY3ZsbUFuVkZrNllzZmlLcExKUERyYnUzaTN1OFFLcGtnc1MxRlhVZG9wcWhQLwpwd0RtUTdYejg0dVI5VnR6Sk8xNkdNU213T1cyZkNNaFRXNk5uOG9pYWJ3NWZKUWRTQStTSW5aRTBxL3hReEdYCmNUWXlXekh3YTJCZWxxUmNoSzdnNG5OY1M3WHhpYllRa05lcms0eDU3bmkxaFVDenVJUzRGeUQ0Uks4akJRdWYKdHNiNktoOFJFcElpTE96RklzSlpOazlFLzFpUFBIaS82aGdhSGdEb1BnNmN3ci85ajNscm42TzJMUjR4NnErOAoyb3luK0lTNmNtenVaNmxZNlIzbzlRMlRSc0d1Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + openssl base64 -d -A -out /tmp/tmp.omAYLSltx7/vault.crt + kubectl get cm kube-root-ca.crt -o 'jsonpath={['\''data'\'']['\''ca\.crt'\'']}' + kubectl create secret generic vault-server-tls --from-file=tls.key=/tmp/tmp.omAYLSltx7/vault.key --from-file=tls.crt=/tmp/tmp.omAYLSltx7/vault.crt --from-file=ca.crt=/tmp/tmp.omAYLSltx7/vault.ca secret/vault-server-tls created + rm -rf /tmp/tmp.omAYLSltx7 + deploy_vault users-vault-service --set global.enabled=true --set global.tlsDisable=false --set server.extraEnvironmentVars.VAULT_CACERT=/vault/userconfig/vault-server-tls/ca.crt --set 'server.volumes[0].name=userconfig-vault-server-tls' --set 'server.volumes[0].secret.defaultMode=420' --set 'server.volumes[0].secret.secretName=vault-server-tls' --set 'server.volumeMounts[0].mountPath=/vault/userconfig/vault-server-tls' --set 'server.volumeMounts[0].name=userconfig-vault-server-tls' --set 'server.volumeMounts[0].readOnly=true' --set server.standalone.enabled=true --set-string 'server.standalone.config=listener "tcp" { address = "[::]:8200" cluster_address = "[::]:8201" tls_cert_file = "/vault/userconfig/vault-server-tls/tls.crt" tls_key_file = "/vault/userconfig/vault-server-tls/tls.key" tls_client_ca_file = "/vault/userconfig/vault-server-tls/ca.crt" } storage "file" { path = "/vault/data" }' + local name=users-vault-service + [[ 23 -gt 0 ]] + shift + desc 'install Vault users-vault-service' + set +o xtrace ----------------------------------------------------------------------------------- install Vault users-vault-service ----------------------------------------------------------------------------------- + helm uninstall users-vault-service Error: uninstall: Release not loaded: users-vault-service: release: not found + : + helm repo remove hashicorp "hashicorp" has been removed from your repositories + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" has been added to your repositories + destroy_vault users-vault-service + local name=users-vault-service + local vault_ns ++ helm list --all-namespaces --filter users-vault-service ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + vault_ns= + desc 'destroy vault' + set +o xtrace ----------------------------------------------------------------------------------- destroy vault ----------------------------------------------------------------------------------- ++ kubectl api-resources ++ grep vault ++ awk '{print $1}' + '[' -n '' ']' ++ kubectl get clusterrolebinding -l app.kubernetes.io/instance=users-vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrolebinding users-vault-service-agent-injector-binding users-vault-service-server-binding clusterrolebinding.rbac.authorization.k8s.io "users-vault-service-agent-injector-binding" deleted clusterrolebinding.rbac.authorization.k8s.io "users-vault-service-server-binding" deleted ++ kubectl get clusterrole -l app.kubernetes.io/instance=users-vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrole users-vault-service-agent-injector-clusterrole clusterrole.rbac.authorization.k8s.io "users-vault-service-agent-injector-clusterrole" deleted ++ kubectl get mutatingwebhookconfiguration -l app.kubernetes.io/instance=users-vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete mutatingwebhookconfiguration users-vault-service-agent-injector-cfg mutatingwebhookconfiguration.admissionregistration.k8s.io "users-vault-service-agent-injector-cfg" deleted + [[ -n '' ]] + retry 10 60 helm install users-vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false --set global.enabled=true --set global.tlsDisable=false --set server.extraEnvironmentVars.VAULT_CACERT=/vault/userconfig/vault-server-tls/ca.crt --set 'server.volumes[0].name=userconfig-vault-server-tls' --set 'server.volumes[0].secret.defaultMode=420' --set 'server.volumes[0].secret.secretName=vault-server-tls' --set 'server.volumeMounts[0].mountPath=/vault/userconfig/vault-server-tls' --set 'server.volumeMounts[0].name=userconfig-vault-server-tls' --set 'server.volumeMounts[0].readOnly=true' --set server.standalone.enabled=true --set-string 'server.standalone.config=listener "tcp" { address = "[::]:8200" cluster_address = "[::]:8201" tls_cert_file = "/vault/userconfig/vault-server-tls/tls.crt" tls_key_file = "/vault/userconfig/vault-server-tls/tls.key" tls_client_ca_file = "/vault/userconfig/vault-server-tls/ca.crt" } storage "file" { path = "/vault/data" }' + local max=10 + local delay=60 + shift 2 + local n=1 + helm install users-vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false --set global.enabled=true --set global.tlsDisable=false --set server.extraEnvironmentVars.VAULT_CACERT=/vault/userconfig/vault-server-tls/ca.crt --set 'server.volumes[0].name=userconfig-vault-server-tls' --set 'server.volumes[0].secret.defaultMode=420' --set 'server.volumes[0].secret.secretName=vault-server-tls' --set 'server.volumeMounts[0].mountPath=/vault/userconfig/vault-server-tls' --set 'server.volumeMounts[0].name=userconfig-vault-server-tls' --set 'server.volumeMounts[0].readOnly=true' --set server.standalone.enabled=true --set-string 'server.standalone.config=listener "tcp" { address = "[::]:8200" cluster_address = "[::]:8201" tls_cert_file = "/vault/userconfig/vault-server-tls/tls.crt" tls_key_file = "/vault/userconfig/vault-server-tls/tls.key" tls_client_ca_file = "/vault/userconfig/vault-server-tls/ca.crt" } storage "file" { path = "/vault/data" }' NAME: users-vault-service LAST DEPLOYED: Wed Jan 21 14:20:15 2026 NAMESPACE: users-vault-7177 STATUS: deployed REVISION: 1 NOTES: Thank you for installing HashiCorp Vault! Now that you have deployed Vault, you should look over the docs on using Vault with Kubernetes available here: https://developer.hashicorp.com/vault/docs Your release is named users-vault-service. To learn more about the release, try: $ helm status users-vault-service $ helm get manifest users-vault-service + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + grep Running + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/users-vault-service-0 -o 'jsonpath={.status.phase}' + grep Running Running + sleep 5 + kubectl_bin exec pod/users-vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json ++ mktemp + local LAST_OUT=/tmp/tmp.6hKkgHg6fw ++ mktemp + local LAST_ERR=/tmp/tmp.VE2OKzlV6b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec pod/users-vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6hKkgHg6fw + cat /tmp/tmp.VE2OKzlV6b + rm /tmp/tmp.6hKkgHg6fw /tmp/tmp.VE2OKzlV6b + return 0 + local unsealKey ++ jq -r '.unseal_keys_b64[]' + unsealKey=Tf4HTezr0rIK7Tk/MbtMOaRz/fS7vXvBgNPZUFILbLI= + local token ++ jq -r .root_token + token=hvs.aobYnvFmS3TOI0LvB9sO9b14 + kubectl_bin exec pod/users-vault-service-0 -- vault operator unseal Tf4HTezr0rIK7Tk/MbtMOaRz/fS7vXvBgNPZUFILbLI= ++ mktemp + local LAST_OUT=/tmp/tmp.s4avV8DI9X ++ mktemp + local LAST_ERR=/tmp/tmp.3xgIRoxxZ3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec pod/users-vault-service-0 -- vault operator unseal Tf4HTezr0rIK7Tk/MbtMOaRz/fS7vXvBgNPZUFILbLI= + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s4avV8DI9X Key Value --- ----- Seal Type shamir Initialized true Sealed false Total Shares 1 Threshold 1 Version 1.21.2 Build Date 2026-01-06T08:33:05Z Storage Type file Cluster Name vault-cluster-c6eb11b9 Cluster ID e6245949-3a77-6f04-dc6d-dd2a783a6d18 HA Enabled false + cat /tmp/tmp.3xgIRoxxZ3 + rm /tmp/tmp.s4avV8DI9X /tmp/tmp.3xgIRoxxZ3 + return 0 + kubectl_bin exec -it pod/users-vault-service-0 -- sh ++ mktemp + local LAST_OUT=/tmp/tmp.jFHQWKJCp6 ++ mktemp + local LAST_ERR=/tmp/tmp.n78NfVSdrw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -it pod/users-vault-service-0 -- sh + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jFHQWKJCp6 Success! You are now authenticated. The token information displayed below is already stored in the token helper. You do NOT need to run "vault login" again. Future Vault requests will automatically use this token. Key Value --- ----- token hvs.aobYnvFmS3TOI0LvB9sO9b14 token_accessor iuwbonBwSr0BzkW3Dovc1vsd token_duration ∞ token_renewable false token_policies ["root"] identity_policies [] policies ["root"] Success! Enabled the kv-v2 secrets engine at: secret/ + cat /tmp/tmp.n78NfVSdrw Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.jFHQWKJCp6 /tmp/tmp.n78NfVSdrw + return 0 + kubectl_bin create secret generic vault-secret --from-literal=token=hvs.aobYnvFmS3TOI0LvB9sO9b14 ++ mktemp + local LAST_OUT=/tmp/tmp.SBwVN9iqCx ++ mktemp + local LAST_ERR=/tmp/tmp.YbE7adXyGH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create secret generic vault-secret --from-literal=token=hvs.aobYnvFmS3TOI0LvB9sO9b14 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SBwVN9iqCx secret/vault-secret created + cat /tmp/tmp.YbE7adXyGH + rm /tmp/tmp.SBwVN9iqCx /tmp/tmp.YbE7adXyGH + return 0 + sleep 10 + wait_pod users-vault-service-0 + local pod=users-vault-service-0 + set +o xtrace waiting for pod/users-vault-service-0 to be ready.OK + sleep 20 + kubectl_bin exec users-vault-service-0 -- vault auth enable kubernetes ++ mktemp + local LAST_OUT=/tmp/tmp.sogzbFvxUu ++ mktemp + local LAST_ERR=/tmp/tmp.zVsXjogfCY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- vault auth enable kubernetes + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sogzbFvxUu Success! Enabled kubernetes auth method at: kubernetes/ + cat /tmp/tmp.zVsXjogfCY + rm /tmp/tmp.sogzbFvxUu /tmp/tmp.zVsXjogfCY + return 0 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/users-vault/conf/role-binding.yml + yq '.metadata.namespace="users-vault-7177"' + yq '.subjects[0].namespace="psmdb-operator"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PawXJhYvyP ++ mktemp + local LAST_ERR=/tmp/tmp.oC6A96b1CC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PawXJhYvyP rolebinding.rbac.authorization.k8s.io/vault-role-binding created + cat /tmp/tmp.oC6A96b1CC + rm /tmp/tmp.PawXJhYvyP /tmp/tmp.oC6A96b1CC + return 0 ++ kubectl_bin exec users-vault-service-0 -- vault token create -policy=operator -format=json ++ jq -r .auth.client_token +++ mktemp ++ local LAST_OUT=/tmp/tmp.XClnBPLQHY +++ mktemp ++ local LAST_ERR=/tmp/tmp.xCQUIkvo5p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec users-vault-service-0 -- vault token create -policy=operator -format=json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XClnBPLQHY ++ cat /tmp/tmp.xCQUIkvo5p ++ rm /tmp/tmp.XClnBPLQHY /tmp/tmp.xCQUIkvo5p ++ return 0 + token=hvs.CAESIP_eC-9uir-mpnmmzmoXBMPsGtNQjZEt2CdcJEOZqFkbGh4KHGh2cy5Zb0k1WGQ3RnVjeGNwWXZIaHQ5bk9Od0E + kubectl_bin create secret generic vault-sync-secret --from-literal=token=hvs.CAESIP_eC-9uir-mpnmmzmoXBMPsGtNQjZEt2CdcJEOZqFkbGh4KHGh2cy5Zb0k1WGQ3RnVjeGNwWXZIaHQ5bk9Od0E ++ mktemp + local LAST_OUT=/tmp/tmp.IRcdBUdg7N ++ mktemp + local LAST_ERR=/tmp/tmp.dhkcLJCL0v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create secret generic vault-sync-secret --from-literal=token=hvs.CAESIP_eC-9uir-mpnmmzmoXBMPsGtNQjZEt2CdcJEOZqFkbGh4KHGh2cy5Zb0k1WGQ3RnVjeGNwWXZIaHQ5bk9Od0E + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IRcdBUdg7N secret/vault-sync-secret created + cat /tmp/tmp.dhkcLJCL0v + rm /tmp/tmp.IRcdBUdg7N /tmp/tmp.dhkcLJCL0v + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault write auth/kubernetes/config kubernetes_host=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT' ++ mktemp + local LAST_OUT=/tmp/tmp.odUvTviVOw ++ mktemp + local LAST_ERR=/tmp/tmp.v4SqUEWMV0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault write auth/kubernetes/config kubernetes_host=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.odUvTviVOw Success! Data written to: auth/kubernetes/config + cat /tmp/tmp.v4SqUEWMV0 + rm /tmp/tmp.odUvTviVOw /tmp/tmp.v4SqUEWMV0 + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault policy write operator - < deletion_time n/a destroyed false version 1 + cat /tmp/tmp.voF1DO9dkf + rm /tmp/tmp.cMlyuFuoq9 /tmp/tmp.voF1DO9dkf + return 0 + rm -f /tmp/tmp.pLdV0z3gRt /tmp/tmp.m4X3l00cYT + desc 'create first PSMDB cluster some-name-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/users-vault/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/users-vault/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/e2e-tests/users-vault/conf/some-name-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2205-44b3f99f"' + local LAST_OUT=/tmp/tmp.q2JSL9o6uU + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + /usr/sbin/sed -e s/NAME_SPACE/users-vault-7177/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.vPCuhmO489 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q2JSL9o6uU perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.vPCuhmO489 + rm /tmp/tmp.q2JSL9o6uU /tmp/tmp.vPCuhmO489 + return 0 + desc 'Check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- Check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..................OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3SVxu8oyWV +++ mktemp ++ local LAST_ERR=/tmp/tmp.g1xYu3ufpT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3SVxu8oyWV ++ cat /tmp/tmp.g1xYu3ufpT ++ rm /tmp/tmp.3SVxu8oyWV /tmp/tmp.g1xYu3ufpT ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zFy5QhbLKd +++ mktemp ++ local LAST_ERR=/tmp/tmp.hnrQ0dQLiz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zFy5QhbLKd ++ cat /tmp/tmp.hnrQ0dQLiz ++ rm /tmp/tmp.zFy5QhbLKd /tmp/tmp.hnrQ0dQLiz ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gLWJzx15xt +++ mktemp ++ local LAST_ERR=/tmp/tmp.fR2n83dZJ4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gLWJzx15xt ++ cat /tmp/tmp.fR2n83dZJ4 ++ rm /tmp/tmp.gLWJzx15xt /tmp/tmp.fR2n83dZJ4 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'check MONGODB_USER_ADMIN_PASSWORD' + set +o xtrace ----------------------------------------------------------------------------------- check MONGODB_USER_ADMIN_PASSWORD ----------------------------------------------------------------------------------- ++ getUserData some-users MONGODB_USER_ADMIN_USER ++ local secretName=some-users ++ local dataKey=MONGODB_USER_ADMIN_USER +++ getSecretData some-users MONGODB_USER_ADMIN_USER +++ local secretName=some-users +++ local dataKey=MONGODB_USER_ADMIN_USER ++++ kubectl get secrets/some-users '--template={{.data.MONGODB_USER_ADMIN_USER}}' ++++ base64 -d +++ local data=userAdmin +++ echo userAdmin ++ urlencode userAdmin ++ uri=userAdmin ++ echo -n userAdmin ++ jq -s -R -r @uri + user=userAdmin + check_mongo_auth userAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=userAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' userAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=userAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ userAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.R3vWk6U54m ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5EJhxHfPsz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.R3vWk6U54m +++ cat /tmp/tmp.5EJhxHfPsz +++ rm /tmp/tmp.R3vWk6U54m /tmp/tmp.5EJhxHfPsz +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://userAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zOoO2C0xhj +++ mktemp ++ local LAST_ERR=/tmp/tmp.8TNX1kPIGA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://userAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zOoO2C0xhj ++ cat /tmp/tmp.8TNX1kPIGA ++ rm /tmp/tmp.zOoO2C0xhj /tmp/tmp.8TNX1kPIGA ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth userAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=userAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' userAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=userAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ userAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TtaZo3E2Il ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nfGb1xzGJi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TtaZo3E2Il +++ cat /tmp/tmp.nfGb1xzGJi +++ rm /tmp/tmp.TtaZo3E2Il /tmp/tmp.nfGb1xzGJi +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://userAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w5Hl2EBfTh +++ mktemp ++ local LAST_ERR=/tmp/tmp.I0r3hI5koi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://userAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w5Hl2EBfTh ++ cat /tmp/tmp.I0r3hI5koi ++ rm /tmp/tmp.w5Hl2EBfTh /tmp/tmp.I0r3hI5koi ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth userAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=userAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' userAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=userAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ userAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ymlLFZYK7x ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kTWwzb2awQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ymlLFZYK7x +++ cat /tmp/tmp.kTWwzb2awQ +++ rm /tmp/tmp.ymlLFZYK7x /tmp/tmp.kTWwzb2awQ +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://userAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L6gMg46aaq +++ mktemp ++ local LAST_ERR=/tmp/tmp.IASGccNkRX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://userAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L6gMg46aaq ++ cat /tmp/tmp.IASGccNkRX ++ rm /tmp/tmp.L6gMg46aaq /tmp/tmp.IASGccNkRX ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'change MONGODB_USER_ADMIN_USER' + set +o xtrace ----------------------------------------------------------------------------------- change MONGODB_USER_ADMIN_USER ----------------------------------------------------------------------------------- + newname=someUserAdminUser + vault_append MONGODB_USER_ADMIN_USER someUserAdminUser + local key=MONGODB_USER_ADMIN_USER + local value=someUserAdminUser + local tmp_json ++ mktemp + tmp_json=/tmp/tmp.3YtsFCKH1l + local new_tmp_json ++ mktemp + new_tmp_json=/tmp/tmp.LFSNJjSkFf + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + jq .data.data ++ mktemp + local LAST_OUT=/tmp/tmp.DzuuF6iQJS ++ mktemp + local LAST_ERR=/tmp/tmp.HxlIc7tBLI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DzuuF6iQJS + cat /tmp/tmp.HxlIc7tBLI + rm /tmp/tmp.DzuuF6iQJS /tmp/tmp.HxlIc7tBLI + return 0 + '[' '!' -s /tmp/tmp.3YtsFCKH1l ']' + jq --arg key MONGODB_USER_ADMIN_USER --arg value someUserAdminUser '(. // {}) + {($key): $value}' /tmp/tmp.3YtsFCKH1l + kubectl_bin cp /tmp/tmp.LFSNJjSkFf users-vault-service-0:/tmp/data_new.json ++ mktemp + local LAST_OUT=/tmp/tmp.VzDnTF8gvm ++ mktemp + local LAST_ERR=/tmp/tmp.h1U0V0Qjzy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /tmp/tmp.LFSNJjSkFf users-vault-service-0:/tmp/data_new.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VzDnTF8gvm + cat /tmp/tmp.h1U0V0Qjzy + rm /tmp/tmp.VzDnTF8gvm /tmp/tmp.h1U0V0Qjzy + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' ++ mktemp + local LAST_OUT=/tmp/tmp.2o6ENA7b8S ++ mktemp + local LAST_ERR=/tmp/tmp.Ry7bFbwH4q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2o6ENA7b8S ======================= Secret Path ======================= secret/data/psmdb/operator/users-vault-7177/some-name/users ======= Metadata ======= Key Value --- ----- created_time 2026-01-21T14:23:53.721062609Z custom_metadata deletion_time n/a destroyed false version 2 + cat /tmp/tmp.Ry7bFbwH4q + rm /tmp/tmp.2o6ENA7b8S /tmp/tmp.Ry7bFbwH4q + return 0 + rm -f /tmp/tmp.3YtsFCKH1l /tmp/tmp.LFSNJjSkFf + sleep 25 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oCjPeSnCu4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yyb2xJiOZR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oCjPeSnCu4 ++ cat /tmp/tmp.Yyb2xJiOZR ++ rm /tmp/tmp.oCjPeSnCu4 /tmp/tmp.Yyb2xJiOZR ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + sleep 15 + check_mongo_auth someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rgmpWAvgHx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kSSXvxqWZl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rgmpWAvgHx +++ cat /tmp/tmp.kSSXvxqWZl +++ rm /tmp/tmp.rgmpWAvgHx /tmp/tmp.kSSXvxqWZl +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2exTFCYGHe +++ mktemp ++ local LAST_ERR=/tmp/tmp.riEFC8izzV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2exTFCYGHe ++ cat /tmp/tmp.riEFC8izzV ++ rm /tmp/tmp.2exTFCYGHe /tmp/tmp.riEFC8izzV ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tOz8MnSfZC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BUKZQZurUf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.tOz8MnSfZC +++ cat /tmp/tmp.BUKZQZurUf +++ rm /tmp/tmp.tOz8MnSfZC /tmp/tmp.BUKZQZurUf +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cZuYTZwRdb +++ mktemp ++ local LAST_ERR=/tmp/tmp.TNtHsGYu7Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cZuYTZwRdb ++ cat /tmp/tmp.TNtHsGYu7Q ++ rm /tmp/tmp.cZuYTZwRdb /tmp/tmp.TNtHsGYu7Q ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g3l0oag1P2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HJe46UPKKg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.g3l0oag1P2 +++ cat /tmp/tmp.HJe46UPKKg +++ rm /tmp/tmp.g3l0oag1P2 /tmp/tmp.HJe46UPKKg +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TsXkcIwOao +++ mktemp ++ local LAST_ERR=/tmp/tmp.LB1Ceoj9rQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TsXkcIwOao ++ cat /tmp/tmp.LB1Ceoj9rQ ++ rm /tmp/tmp.TsXkcIwOao /tmp/tmp.LB1Ceoj9rQ ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'removing tokenSecret to check kubernetes auth' + set +o xtrace ----------------------------------------------------------------------------------- removing tokenSecret to check kubernetes auth ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op":"remove","path":"/spec/vault/syncUsers/tokenSecret"}]' ++ mktemp + local LAST_OUT=/tmp/tmp.EXU5QsvwJg ++ mktemp + local LAST_ERR=/tmp/tmp.T76SeMjiQr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op":"remove","path":"/spec/vault/syncUsers/tokenSecret"}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EXU5QsvwJg perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.T76SeMjiQr + rm /tmp/tmp.EXU5QsvwJg /tmp/tmp.T76SeMjiQr + return 0 + kubectl_bin delete secret vault-sync-secret ++ mktemp + local LAST_OUT=/tmp/tmp.nc54lPOr7L ++ mktemp + local LAST_ERR=/tmp/tmp.pv8HdhW30Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete secret vault-sync-secret + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nc54lPOr7L secret "vault-sync-secret" deleted from users-vault-7177 namespace + cat /tmp/tmp.pv8HdhW30Y + rm /tmp/tmp.nc54lPOr7L /tmp/tmp.pv8HdhW30Y + return 0 + desc 'change MONGODB_DATABASE_ADMIN_PASSWORD' + set +o xtrace ----------------------------------------------------------------------------------- change MONGODB_DATABASE_ADMIN_PASSWORD ----------------------------------------------------------------------------------- + vault_append MONGODB_DATABASE_ADMIN_PASSWORD test-password + local key=MONGODB_DATABASE_ADMIN_PASSWORD + local value=test-password + local tmp_json ++ mktemp + tmp_json=/tmp/tmp.L9xosG8uL2 + local new_tmp_json ++ mktemp + new_tmp_json=/tmp/tmp.CYLNVxXzQQ + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + jq .data.data ++ mktemp + local LAST_OUT=/tmp/tmp.L9HCaYr4GK ++ mktemp + local LAST_ERR=/tmp/tmp.soYLa4AmDU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L9HCaYr4GK + cat /tmp/tmp.soYLa4AmDU + rm /tmp/tmp.L9HCaYr4GK /tmp/tmp.soYLa4AmDU + return 0 + '[' '!' -s /tmp/tmp.L9xosG8uL2 ']' + jq --arg key MONGODB_DATABASE_ADMIN_PASSWORD --arg value test-password '(. // {}) + {($key): $value}' /tmp/tmp.L9xosG8uL2 + kubectl_bin cp /tmp/tmp.CYLNVxXzQQ users-vault-service-0:/tmp/data_new.json ++ mktemp + local LAST_OUT=/tmp/tmp.qCv4tsmzWN ++ mktemp + local LAST_ERR=/tmp/tmp.yz7fv80Nv8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /tmp/tmp.CYLNVxXzQQ users-vault-service-0:/tmp/data_new.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qCv4tsmzWN + cat /tmp/tmp.yz7fv80Nv8 + rm /tmp/tmp.qCv4tsmzWN /tmp/tmp.yz7fv80Nv8 + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' ++ mktemp + local LAST_OUT=/tmp/tmp.G5FFBbcZha ++ mktemp + local LAST_ERR=/tmp/tmp.8TQ6XhoMWP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G5FFBbcZha ======================= Secret Path ======================= secret/data/psmdb/operator/users-vault-7177/some-name/users ======= Metadata ======= Key Value --- ----- created_time 2026-01-21T14:24:56.084482086Z custom_metadata deletion_time n/a destroyed false version 3 + cat /tmp/tmp.8TQ6XhoMWP + rm /tmp/tmp.G5FFBbcZha /tmp/tmp.8TQ6XhoMWP + return 0 + rm -f /tmp/tmp.L9xosG8uL2 /tmp/tmp.CYLNVxXzQQ + sleep 25 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YDUwMxACUv +++ mktemp ++ local LAST_ERR=/tmp/tmp.goQY8pORQL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YDUwMxACUv ++ cat /tmp/tmp.goQY8pORQL ++ rm /tmp/tmp.YDUwMxACUv /tmp/tmp.goQY8pORQL ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + sleep 15 ++ getUserData some-users MONGODB_DATABASE_ADMIN_USER ++ local secretName=some-users ++ local dataKey=MONGODB_DATABASE_ADMIN_USER +++ getSecretData some-users MONGODB_DATABASE_ADMIN_USER +++ local secretName=some-users +++ local dataKey=MONGODB_DATABASE_ADMIN_USER ++++ kubectl get secrets/some-users '--template={{.data.MONGODB_DATABASE_ADMIN_USER}}' ++++ base64 -d +++ local data=databaseAdmin +++ echo databaseAdmin ++ urlencode databaseAdmin ++ uri=databaseAdmin ++ echo -n databaseAdmin ++ jq -s -R -r @uri + user=databaseAdmin + check_mongo_auth databaseAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=databaseAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' databaseAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=databaseAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ databaseAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.miC3gGdiEQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.irRcB2gGds +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.miC3gGdiEQ +++ cat /tmp/tmp.irRcB2gGds +++ rm /tmp/tmp.miC3gGdiEQ /tmp/tmp.irRcB2gGds +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://databaseAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3hj1DpuACc +++ mktemp ++ local LAST_ERR=/tmp/tmp.vT4O9GLSg5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://databaseAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3hj1DpuACc ++ cat /tmp/tmp.vT4O9GLSg5 ++ rm /tmp/tmp.3hj1DpuACc /tmp/tmp.vT4O9GLSg5 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth databaseAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=databaseAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' databaseAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=databaseAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ databaseAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Gdc70UVOfq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QDUxPn9Kj0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Gdc70UVOfq +++ cat /tmp/tmp.QDUxPn9Kj0 +++ rm /tmp/tmp.Gdc70UVOfq /tmp/tmp.QDUxPn9Kj0 +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://databaseAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pnLoWqhT8j +++ mktemp ++ local LAST_ERR=/tmp/tmp.3pWDOWdVC0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://databaseAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pnLoWqhT8j ++ cat /tmp/tmp.3pWDOWdVC0 ++ rm /tmp/tmp.pnLoWqhT8j /tmp/tmp.3pWDOWdVC0 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth databaseAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=databaseAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' databaseAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=databaseAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ databaseAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TEOMpr6IsE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KyERCZLmLY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TEOMpr6IsE +++ cat /tmp/tmp.KyERCZLmLY +++ rm /tmp/tmp.TEOMpr6IsE /tmp/tmp.KyERCZLmLY +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://databaseAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZbzZpIGFR2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GCAbT9uWc7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://databaseAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZbzZpIGFR2 ++ cat /tmp/tmp.GCAbT9uWc7 ++ rm /tmp/tmp.ZbzZpIGFR2 /tmp/tmp.GCAbT9uWc7 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'change MONGODB_BACKUP_PASSWORD' + set +o xtrace ----------------------------------------------------------------------------------- change MONGODB_BACKUP_PASSWORD ----------------------------------------------------------------------------------- + vault_append MONGODB_BACKUP_PASSWORD test-password + local key=MONGODB_BACKUP_PASSWORD + local value=test-password + local tmp_json ++ mktemp + tmp_json=/tmp/tmp.NiGtutDBU6 + local new_tmp_json ++ mktemp + new_tmp_json=/tmp/tmp.mmvfawojeN + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' ++ mktemp + jq .data.data + local LAST_OUT=/tmp/tmp.Vl7vsNQpHZ ++ mktemp + local LAST_ERR=/tmp/tmp.tun7eUDDci + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Vl7vsNQpHZ + cat /tmp/tmp.tun7eUDDci + rm /tmp/tmp.Vl7vsNQpHZ /tmp/tmp.tun7eUDDci + return 0 + '[' '!' -s /tmp/tmp.NiGtutDBU6 ']' + jq --arg key MONGODB_BACKUP_PASSWORD --arg value test-password '(. // {}) + {($key): $value}' /tmp/tmp.NiGtutDBU6 + kubectl_bin cp /tmp/tmp.mmvfawojeN users-vault-service-0:/tmp/data_new.json ++ mktemp + local LAST_OUT=/tmp/tmp.tBwxg4smxm ++ mktemp + local LAST_ERR=/tmp/tmp.twjgvt7Huc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /tmp/tmp.mmvfawojeN users-vault-service-0:/tmp/data_new.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tBwxg4smxm + cat /tmp/tmp.twjgvt7Huc + rm /tmp/tmp.tBwxg4smxm /tmp/tmp.twjgvt7Huc + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' ++ mktemp + local LAST_OUT=/tmp/tmp.idoAJOlKIX ++ mktemp + local LAST_ERR=/tmp/tmp.PfHg1lCbQZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.idoAJOlKIX ======================= Secret Path ======================= secret/data/psmdb/operator/users-vault-7177/some-name/users ======= Metadata ======= Key Value --- ----- created_time 2026-01-21T14:25:57.665535714Z custom_metadata deletion_time n/a destroyed false version 4 + cat /tmp/tmp.PfHg1lCbQZ + rm /tmp/tmp.idoAJOlKIX /tmp/tmp.PfHg1lCbQZ + return 0 + rm -f /tmp/tmp.NiGtutDBU6 /tmp/tmp.mmvfawojeN + sleep 25 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qZtteJ0Pqk +++ mktemp ++ local LAST_ERR=/tmp/tmp.U4vY1lsv3c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qZtteJ0Pqk ++ cat /tmp/tmp.U4vY1lsv3c ++ rm /tmp/tmp.qZtteJ0Pqk /tmp/tmp.U4vY1lsv3c ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + sleep 15 ++ getUserData some-users MONGODB_BACKUP_USER ++ local secretName=some-users ++ local dataKey=MONGODB_BACKUP_USER +++ getSecretData some-users MONGODB_BACKUP_USER +++ local secretName=some-users +++ local dataKey=MONGODB_BACKUP_USER ++++ kubectl get secrets/some-users '--template={{.data.MONGODB_BACKUP_USER}}' ++++ base64 -d +++ local data=backup +++ echo backup ++ urlencode backup ++ uri=backup ++ echo -n backup ++ jq -s -R -r @uri + user=backup + check_mongo_auth backup:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=backup:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' backup:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=backup:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ backup:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oTlkWyitw5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xiC1XCXrr7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oTlkWyitw5 +++ cat /tmp/tmp.xiC1XCXrr7 +++ rm /tmp/tmp.oTlkWyitw5 /tmp/tmp.xiC1XCXrr7 +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://backup:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cbVvY1x1Wn +++ mktemp ++ local LAST_ERR=/tmp/tmp.PI8MFRJkaw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://backup:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cbVvY1x1Wn ++ cat /tmp/tmp.PI8MFRJkaw ++ rm /tmp/tmp.cbVvY1x1Wn /tmp/tmp.PI8MFRJkaw ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth backup:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=backup:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' backup:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=backup:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ backup:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rWQTprvpfi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ODJSBTqLO4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rWQTprvpfi +++ cat /tmp/tmp.ODJSBTqLO4 +++ rm /tmp/tmp.rWQTprvpfi /tmp/tmp.ODJSBTqLO4 +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://backup:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kp1rl0yb0s +++ mktemp ++ local LAST_ERR=/tmp/tmp.6yhm6sWRUk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://backup:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kp1rl0yb0s ++ cat /tmp/tmp.6yhm6sWRUk ++ rm /tmp/tmp.kp1rl0yb0s /tmp/tmp.6yhm6sWRUk ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth backup:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=backup:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' backup:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=backup:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ backup:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UKk68uVygC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KeP72WpnAj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UKk68uVygC +++ cat /tmp/tmp.KeP72WpnAj +++ rm /tmp/tmp.UKk68uVygC /tmp/tmp.KeP72WpnAj +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://backup:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CBF0Y5rBm0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qMOFeVZeAN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://backup:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CBF0Y5rBm0 ++ cat /tmp/tmp.qMOFeVZeAN ++ rm /tmp/tmp.CBF0Y5rBm0 /tmp/tmp.qMOFeVZeAN ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'change MONGODB_BACKUP_USER' + set +o xtrace ----------------------------------------------------------------------------------- change MONGODB_BACKUP_USER ----------------------------------------------------------------------------------- + newname=someBackupUser + vault_append MONGODB_BACKUP_USER someBackupUser + local key=MONGODB_BACKUP_USER + local value=someBackupUser + local tmp_json ++ mktemp + tmp_json=/tmp/tmp.8k12wramsQ + local new_tmp_json ++ mktemp + new_tmp_json=/tmp/tmp.SbhnkKeu10 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + jq .data.data ++ mktemp + local LAST_OUT=/tmp/tmp.fikonbJqRx ++ mktemp + local LAST_ERR=/tmp/tmp.RtZihN4nHp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fikonbJqRx + cat /tmp/tmp.RtZihN4nHp + rm /tmp/tmp.fikonbJqRx /tmp/tmp.RtZihN4nHp + return 0 + '[' '!' -s /tmp/tmp.8k12wramsQ ']' + jq --arg key MONGODB_BACKUP_USER --arg value someBackupUser '(. // {}) + {($key): $value}' /tmp/tmp.8k12wramsQ + kubectl_bin cp /tmp/tmp.SbhnkKeu10 users-vault-service-0:/tmp/data_new.json ++ mktemp + local LAST_OUT=/tmp/tmp.PVKY1bu0QY ++ mktemp + local LAST_ERR=/tmp/tmp.rCcOaDk3QA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /tmp/tmp.SbhnkKeu10 users-vault-service-0:/tmp/data_new.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PVKY1bu0QY + cat /tmp/tmp.rCcOaDk3QA + rm /tmp/tmp.PVKY1bu0QY /tmp/tmp.rCcOaDk3QA + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' ++ mktemp + local LAST_OUT=/tmp/tmp.FTJzGUITIj ++ mktemp + local LAST_ERR=/tmp/tmp.8etpas8PYy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FTJzGUITIj ======================= Secret Path ======================= secret/data/psmdb/operator/users-vault-7177/some-name/users ======= Metadata ======= Key Value --- ----- created_time 2026-01-21T14:26:59.19495867Z custom_metadata deletion_time n/a destroyed false version 5 + cat /tmp/tmp.8etpas8PYy + rm /tmp/tmp.FTJzGUITIj /tmp/tmp.8etpas8PYy + return 0 + rm -f /tmp/tmp.8k12wramsQ /tmp/tmp.SbhnkKeu10 + sleep 25 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ys380TO8ku +++ mktemp ++ local LAST_ERR=/tmp/tmp.CGZdwopFuO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ys380TO8ku ++ cat /tmp/tmp.CGZdwopFuO ++ rm /tmp/tmp.Ys380TO8ku /tmp/tmp.CGZdwopFuO ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + sleep 15 + check_mongo_auth someBackupUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=someBackupUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someBackupUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=someBackupUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someBackupUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.W95AJ58h9e ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kvhJrzJCks +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.W95AJ58h9e +++ cat /tmp/tmp.kvhJrzJCks +++ rm /tmp/tmp.W95AJ58h9e /tmp/tmp.kvhJrzJCks +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someBackupUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pWJqXIXSYn +++ mktemp ++ local LAST_ERR=/tmp/tmp.4tShyHMd2I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someBackupUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pWJqXIXSYn ++ cat /tmp/tmp.4tShyHMd2I ++ rm /tmp/tmp.pWJqXIXSYn /tmp/tmp.4tShyHMd2I ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth someBackupUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=someBackupUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someBackupUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=someBackupUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someBackupUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vLrUBREL0u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.A81WYHNECz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vLrUBREL0u +++ cat /tmp/tmp.A81WYHNECz +++ rm /tmp/tmp.vLrUBREL0u /tmp/tmp.A81WYHNECz +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someBackupUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N8hd346QSQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.SqMx5Xsajr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someBackupUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N8hd346QSQ ++ cat /tmp/tmp.SqMx5Xsajr ++ rm /tmp/tmp.N8hd346QSQ /tmp/tmp.SqMx5Xsajr ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth someBackupUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=someBackupUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someBackupUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=someBackupUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someBackupUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sxtnqS0jJw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2s32nqw00F +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.sxtnqS0jJw +++ cat /tmp/tmp.2s32nqw00F +++ rm /tmp/tmp.sxtnqS0jJw /tmp/tmp.2s32nqw00F +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someBackupUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.javKV878oG +++ mktemp ++ local LAST_ERR=/tmp/tmp.3kx28mz9Wu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someBackupUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.javKV878oG ++ cat /tmp/tmp.3kx28mz9Wu ++ rm /tmp/tmp.javKV878oG /tmp/tmp.3kx28mz9Wu ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'change MONGODB_CLUSTER_ADMIN_PASSWORD' + set +o xtrace ----------------------------------------------------------------------------------- change MONGODB_CLUSTER_ADMIN_PASSWORD ----------------------------------------------------------------------------------- + vault_append MONGODB_CLUSTER_ADMIN_PASSWORD test-password + local key=MONGODB_CLUSTER_ADMIN_PASSWORD + local value=test-password + local tmp_json ++ mktemp + tmp_json=/tmp/tmp.arFYBBTlYU + local new_tmp_json ++ mktemp + new_tmp_json=/tmp/tmp.C0o35LGD1G + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + jq .data.data ++ mktemp + local LAST_OUT=/tmp/tmp.nnNrPoHls9 ++ mktemp + local LAST_ERR=/tmp/tmp.QIqngsqLnF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nnNrPoHls9 + cat /tmp/tmp.QIqngsqLnF + rm /tmp/tmp.nnNrPoHls9 /tmp/tmp.QIqngsqLnF + return 0 + '[' '!' -s /tmp/tmp.arFYBBTlYU ']' + jq --arg key MONGODB_CLUSTER_ADMIN_PASSWORD --arg value test-password '(. // {}) + {($key): $value}' /tmp/tmp.arFYBBTlYU + kubectl_bin cp /tmp/tmp.C0o35LGD1G users-vault-service-0:/tmp/data_new.json ++ mktemp + local LAST_OUT=/tmp/tmp.ietwsRAA4x ++ mktemp + local LAST_ERR=/tmp/tmp.SWJnES1rvl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /tmp/tmp.C0o35LGD1G users-vault-service-0:/tmp/data_new.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ietwsRAA4x + cat /tmp/tmp.SWJnES1rvl + rm /tmp/tmp.ietwsRAA4x /tmp/tmp.SWJnES1rvl + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' ++ mktemp + local LAST_OUT=/tmp/tmp.IMkCfigHw6 ++ mktemp + local LAST_ERR=/tmp/tmp.a0QWIIb0s5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IMkCfigHw6 ======================= Secret Path ======================= secret/data/psmdb/operator/users-vault-7177/some-name/users ======= Metadata ======= Key Value --- ----- created_time 2026-01-21T14:27:59.207460851Z custom_metadata deletion_time n/a destroyed false version 6 + cat /tmp/tmp.a0QWIIb0s5 + rm /tmp/tmp.IMkCfigHw6 /tmp/tmp.a0QWIIb0s5 + return 0 + rm -f /tmp/tmp.arFYBBTlYU /tmp/tmp.C0o35LGD1G + sleep 25 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RtZ0FCxpmD +++ mktemp ++ local LAST_ERR=/tmp/tmp.HiQIjX9DV3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RtZ0FCxpmD ++ cat /tmp/tmp.HiQIjX9DV3 ++ rm /tmp/tmp.RtZ0FCxpmD /tmp/tmp.HiQIjX9DV3 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + sleep 15 ++ getUserData some-users MONGODB_CLUSTER_ADMIN_USER ++ local secretName=some-users ++ local dataKey=MONGODB_CLUSTER_ADMIN_USER +++ getSecretData some-users MONGODB_CLUSTER_ADMIN_USER +++ local secretName=some-users +++ local dataKey=MONGODB_CLUSTER_ADMIN_USER ++++ kubectl get secrets/some-users '--template={{.data.MONGODB_CLUSTER_ADMIN_USER}}' ++++ base64 -d +++ local data=clusterAdmin +++ echo clusterAdmin ++ urlencode clusterAdmin ++ uri=clusterAdmin ++ echo -n clusterAdmin ++ jq -s -R -r @uri + user=clusterAdmin + check_mongo_auth clusterAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=clusterAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' clusterAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=clusterAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ clusterAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eIk00pjgcw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TFFxOwWakX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eIk00pjgcw +++ cat /tmp/tmp.TFFxOwWakX +++ rm /tmp/tmp.eIk00pjgcw /tmp/tmp.TFFxOwWakX +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3uRtLesVLe +++ mktemp ++ local LAST_ERR=/tmp/tmp.7rAEk1ZRM9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterAdmin:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3uRtLesVLe ++ cat /tmp/tmp.7rAEk1ZRM9 ++ rm /tmp/tmp.3uRtLesVLe /tmp/tmp.7rAEk1ZRM9 ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth clusterAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=clusterAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' clusterAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=clusterAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ clusterAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MeidErBZUg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uBNJgjasHh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MeidErBZUg +++ cat /tmp/tmp.uBNJgjasHh +++ rm /tmp/tmp.MeidErBZUg /tmp/tmp.uBNJgjasHh +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NHCFDqWSEu +++ mktemp ++ local LAST_ERR=/tmp/tmp.9xgAf9hnTG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterAdmin:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NHCFDqWSEu ++ cat /tmp/tmp.9xgAf9hnTG ++ rm /tmp/tmp.NHCFDqWSEu /tmp/tmp.9xgAf9hnTG ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth clusterAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=clusterAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' clusterAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local uri=clusterAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ clusterAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2VCOpdQDhQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YKw7RDSPHt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2VCOpdQDhQ +++ cat /tmp/tmp.YKw7RDSPHt +++ rm /tmp/tmp.2VCOpdQDhQ /tmp/tmp.YKw7RDSPHt +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0c9ix4y6cC +++ mktemp ++ local LAST_ERR=/tmp/tmp.KMQKu5piiI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterAdmin:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0c9ix4y6cC ++ cat /tmp/tmp.KMQKu5piiI ++ rm /tmp/tmp.0c9ix4y6cC /tmp/tmp.KMQKu5piiI ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'change MONGODB_CLUSTER_MONITOR_PASSWORD' + set +o xtrace ----------------------------------------------------------------------------------- change MONGODB_CLUSTER_MONITOR_PASSWORD ----------------------------------------------------------------------------------- + vault_append MONGODB_CLUSTER_MONITOR_PASSWORD test-password + local key=MONGODB_CLUSTER_MONITOR_PASSWORD + local value=test-password + local tmp_json ++ mktemp + tmp_json=/tmp/tmp.vb6oqa6HIS + local new_tmp_json ++ mktemp + new_tmp_json=/tmp/tmp.fCvM6oF2hH + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + jq .data.data ++ mktemp + local LAST_OUT=/tmp/tmp.TeHWgI3KwQ ++ mktemp + local LAST_ERR=/tmp/tmp.VzVbhPmK0Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv get -format=json -mount=secret psmdb/operator/users-vault-7177/some-name/users' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TeHWgI3KwQ + cat /tmp/tmp.VzVbhPmK0Y + rm /tmp/tmp.TeHWgI3KwQ /tmp/tmp.VzVbhPmK0Y + return 0 + '[' '!' -s /tmp/tmp.vb6oqa6HIS ']' + jq --arg key MONGODB_CLUSTER_MONITOR_PASSWORD --arg value test-password '(. // {}) + {($key): $value}' /tmp/tmp.vb6oqa6HIS + kubectl_bin cp /tmp/tmp.fCvM6oF2hH users-vault-service-0:/tmp/data_new.json ++ mktemp + local LAST_OUT=/tmp/tmp.8W7CIP3Evk ++ mktemp + local LAST_ERR=/tmp/tmp.fSc8TsXe0h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl cp /tmp/tmp.fCvM6oF2hH users-vault-service-0:/tmp/data_new.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8W7CIP3Evk + cat /tmp/tmp.fSc8TsXe0h + rm /tmp/tmp.8W7CIP3Evk /tmp/tmp.fSc8TsXe0h + return 0 + kubectl_bin exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' ++ mktemp + local LAST_OUT=/tmp/tmp.gKdMCTa58x ++ mktemp + local LAST_ERR=/tmp/tmp.fKHJKFo5MU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec users-vault-service-0 -- sh -c 'vault kv put -mount=secret psmdb/operator/users-vault-7177/some-name/users @"/tmp/data_new.json"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gKdMCTa58x ======================= Secret Path ======================= secret/data/psmdb/operator/users-vault-7177/some-name/users ======= Metadata ======= Key Value --- ----- created_time 2026-01-21T14:29:01.72326441Z custom_metadata deletion_time n/a destroyed false version 7 + cat /tmp/tmp.fKHJKFo5MU + rm /tmp/tmp.gKdMCTa58x /tmp/tmp.fKHJKFo5MU + return 0 + rm -f /tmp/tmp.vb6oqa6HIS /tmp/tmp.fCvM6oF2hH + sleep 25 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PABLzGBa39 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VASDRUMN5l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PABLzGBa39 ++ cat /tmp/tmp.VASDRUMN5l ++ rm /tmp/tmp.PABLzGBa39 /tmp/tmp.VASDRUMN5l ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + sleep 15 ++ getUserData some-users MONGODB_CLUSTER_MONITOR_USER ++ local secretName=some-users ++ local dataKey=MONGODB_CLUSTER_MONITOR_USER +++ getSecretData some-users MONGODB_CLUSTER_MONITOR_USER +++ local secretName=some-users +++ local dataKey=MONGODB_CLUSTER_MONITOR_USER ++++ kubectl get secrets/some-users '--template={{.data.MONGODB_CLUSTER_MONITOR_USER}}' ++++ base64 -d +++ local data=clusterMonitor +++ echo clusterMonitor ++ urlencode clusterMonitor ++ uri=clusterMonitor ++ echo -n clusterMonitor ++ jq -s -R -r @uri + user=clusterMonitor + check_mongo_auth clusterMonitor:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=clusterMonitor:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' clusterMonitor:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=clusterMonitor:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ clusterMonitor:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eSYGwx6gpV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RETIdSPGb0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eSYGwx6gpV +++ cat /tmp/tmp.RETIdSPGb0 +++ rm /tmp/tmp.eSYGwx6gpV /tmp/tmp.RETIdSPGb0 +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterMonitor:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AqoCyGLhX0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kdFGSaEqnz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterMonitor:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AqoCyGLhX0 ++ cat /tmp/tmp.kdFGSaEqnz ++ rm /tmp/tmp.AqoCyGLhX0 /tmp/tmp.kdFGSaEqnz ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth clusterMonitor:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=clusterMonitor:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' clusterMonitor:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=clusterMonitor:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ clusterMonitor:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JA9VWJBW5r ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gQXIVmJVw8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JA9VWJBW5r +++ cat /tmp/tmp.gQXIVmJVw8 +++ rm /tmp/tmp.JA9VWJBW5r /tmp/tmp.gQXIVmJVw8 +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterMonitor:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TlG0eQKTyp +++ mktemp ++ local LAST_ERR=/tmp/tmp.DcNVwqN84d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterMonitor:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TlG0eQKTyp ++ cat /tmp/tmp.DcNVwqN84d ++ rm /tmp/tmp.TlG0eQKTyp /tmp/tmp.DcNVwqN84d ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth clusterMonitor:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=clusterMonitor:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' clusterMonitor:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=clusterMonitor:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ clusterMonitor:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xIFZfCPnsR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Mqpb7aBv7i +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xIFZfCPnsR +++ cat /tmp/tmp.Mqpb7aBv7i +++ rm /tmp/tmp.xIFZfCPnsR /tmp/tmp.Mqpb7aBv7i +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterMonitor:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WiamZvpU7Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.bzsyGw8yrd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://clusterMonitor:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WiamZvpU7Z ++ cat /tmp/tmp.bzsyGw8yrd ++ rm /tmp/tmp.WiamZvpU7Z /tmp/tmp.bzsyGw8yrd ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + desc 'remove users secret' + set +o xtrace ----------------------------------------------------------------------------------- remove users secret ----------------------------------------------------------------------------------- + kubectl_bin delete secret some-users ++ mktemp + local LAST_OUT=/tmp/tmp.1s7p2uUeX7 ++ mktemp + local LAST_ERR=/tmp/tmp.lRENUH8oyO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete secret some-users + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1s7p2uUeX7 secret "some-users" deleted from users-vault-7177 namespace + cat /tmp/tmp.lRENUH8oyO + rm /tmp/tmp.1s7p2uUeX7 /tmp/tmp.lRENUH8oyO + return 0 + sleep 35 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eKegHrnuwl +++ mktemp ++ local LAST_ERR=/tmp/tmp.P1KBzCuUiQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eKegHrnuwl ++ cat /tmp/tmp.P1KBzCuUiQ ++ rm /tmp/tmp.eKegHrnuwl /tmp/tmp.P1KBzCuUiQ ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + sleep 15 ++ getUserData some-users MONGODB_USER_ADMIN_USER ++ local secretName=some-users ++ local dataKey=MONGODB_USER_ADMIN_USER +++ getSecretData some-users MONGODB_USER_ADMIN_USER +++ local secretName=some-users +++ local dataKey=MONGODB_USER_ADMIN_USER ++++ kubectl get secrets/some-users '--template={{.data.MONGODB_USER_ADMIN_USER}}' ++++ base64 -d +++ local data=someUserAdminUser +++ echo someUserAdminUser ++ urlencode someUserAdminUser ++ uri=someUserAdminUser ++ echo -n someUserAdminUser ++ jq -s -R -r @uri + user=someUserAdminUser ++ getUserData some-users MONGODB_USER_ADMIN_PASSWORD ++ local secretName=some-users ++ local dataKey=MONGODB_USER_ADMIN_PASSWORD +++ getSecretData some-users MONGODB_USER_ADMIN_PASSWORD +++ local secretName=some-users +++ local dataKey=MONGODB_USER_ADMIN_PASSWORD ++++ kubectl get secrets/some-users '--template={{.data.MONGODB_USER_ADMIN_PASSWORD}}' ++++ base64 -d +++ local data=test-password +++ echo test-password ++ urlencode test-password ++ uri=test-password ++ echo -n test-password ++ jq -s -R -r @uri + pass=test-password + check_mongo_auth someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 + local uri=someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' ++ local replica_set=rs0 ++ [[ someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177 == *cfg* ]] +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.V7IrHohrPE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BlVJM8Y2Vk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.V7IrHohrPE +++ cat /tmp/tmp.BlVJM8Y2Vk +++ rm /tmp/tmp.V7IrHohrPE /tmp/tmp.BlVJM8Y2Vk +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XDhB2sMqwn +++ mktemp ++ local LAST_ERR=/tmp/tmp.sV03i0tJkc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-0.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XDhB2sMqwn ++ cat /tmp/tmp.sV03i0tJkc ++ rm /tmp/tmp.XDhB2sMqwn /tmp/tmp.sV03i0tJkc ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 + local uri=someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Sz2iR2F7iI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kjJWcN7SQO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Sz2iR2F7iI +++ cat /tmp/tmp.kjJWcN7SQO +++ rm /tmp/tmp.Sz2iR2F7iI /tmp/tmp.kjJWcN7SQO +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aZXYZtoD75 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lwl38jotCg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-1.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aZXYZtoD75 ++ cat /tmp/tmp.Lwl38jotCg ++ rm /tmp/tmp.aZXYZtoD75 /tmp/tmp.Lwl38jotCg ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + check_mongo_auth someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 + local uri=someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ run_mongo 'db.runCommand({ ping: 1 }).ok' someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 mongodb '' --quiet ++ local 'command=db.runCommand({ ping: 1 }).ok' ++ local uri=someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag=--quiet ++ local replica_set=rs0 ++ [[ someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177 == *cfg* ]] ++ grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kSRGnPYJay ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rAKqWQyQCN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kSRGnPYJay +++ cat /tmp/tmp.rAKqWQyQCN +++ rm /tmp/tmp.kSRGnPYJay /tmp/tmp.rAKqWQyQCN +++ return 0 ++ local client_container=psmdb-client-696897d69b-m57w2 ++ kubectl_bin exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SR1nmIiJpS +++ mktemp ++ local LAST_ERR=/tmp/tmp.JtdXr7M3nF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-m57w2 -- bash -c 'printf '\''db.runCommand({ ping: 1 }).ok\n'\'' | mongo mongodb://someUserAdminUser:test-password@some-name-rs0-2.some-name-rs0.users-vault-7177.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 --quiet' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SR1nmIiJpS ++ cat /tmp/tmp.JtdXr7M3nF ++ rm /tmp/tmp.SR1nmIiJpS /tmp/tmp.JtdXr7M3nF ++ return 0 + ping=1 + desc 'ping return' + set +o xtrace ----------------------------------------------------------------------------------- ping return ----------------------------------------------------------------------------------- + '[' 1 '!=' 1 ']' + [[ someUserAdminUser != \s\o\m\e\U\s\e\r\A\d\m\i\n\U\s\e\r ]] + [[ test-password != \t\e\s\t\-\p\a\s\s\w\o\r\d ]] + destroy_vault users-vault-service + local name=users-vault-service + local vault_ns ++ helm list --all-namespaces --filter users-vault-service ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + vault_ns=users-vault-7177 + desc 'destroy vault' + set +o xtrace ----------------------------------------------------------------------------------- destroy vault ----------------------------------------------------------------------------------- ++ kubectl api-resources ++ grep vault ++ awk '{print $1}' + '[' -n users-vault-7177 ']' + helm uninstall users-vault-service --namespace users-vault-7177 release "users-vault-service" uninstalled ++ kubectl get clusterrolebinding -l app.kubernetes.io/instance=users-vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole -l app.kubernetes.io/instance=users-vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : ++ kubectl get mutatingwebhookconfiguration -l app.kubernetes.io/instance=users-vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete mutatingwebhookconfiguration error: resource(s) were provided, but no name was specified + : + destroy users-vault-7177 + local namespace=users-vault-7177 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ wc -l ++ kubectl_bin get psmdb-backup --no-headers +++ mktemp ++ local LAST_OUT=/tmp/tmp.T8SPcjHYwq +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gfhkf7fI2o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T8SPcjHYwq ++ cat /tmp/tmp.Gfhkf7fI2o No resources found in users-vault-7177 namespace. ++ rm /tmp/tmp.T8SPcjHYwq /tmp/tmp.Gfhkf7fI2o ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.grA9m1C9Fu ++ mktemp + local LAST_ERR=/tmp/tmp.BNJSJsRe5F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.grA9m1C9Fu customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.BNJSJsRe5F + rm /tmp/tmp.grA9m1C9Fu /tmp/tmp.BNJSJsRe5F + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.milpoiJkbJ ++ mktemp + local LAST_ERR=/tmp/tmp.xH8j405cpt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.milpoiJkbJ + cat /tmp/tmp.xH8j405cpt + rm /tmp/tmp.milpoiJkbJ /tmp/tmp.xH8j405cpt + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.GlecEer8A5 ++ mktemp + local LAST_ERR=/tmp/tmp.NnjeCerSh9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GlecEer8A5 + cat /tmp/tmp.NnjeCerSh9 + rm /tmp/tmp.GlecEer8A5 /tmp/tmp.NnjeCerSh9 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.p9XXVKGmQq ++ mktemp + local LAST_ERR=/tmp/tmp.HR3KPD3naf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p9XXVKGmQq + cat /tmp/tmp.HR3KPD3naf + rm /tmp/tmp.p9XXVKGmQq /tmp/tmp.HR3KPD3naf + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qs766qvmUb ++ mktemp + local LAST_ERR=/tmp/tmp.f9wi333g3n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2205/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qs766qvmUb clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.f9wi333g3n + rm /tmp/tmp.qs766qvmUb /tmp/tmp.f9wi333g3n + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.dk5c1mKTXS ++ mktemp + local LAST_ERR=/tmp/tmp.TRf2d3qZob + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dk5c1mKTXS + cat /tmp/tmp.TRf2d3qZob Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dk5c1mKTXS + cat /tmp/tmp.TRf2d3qZob Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dk5c1mKTXS + cat /tmp/tmp.TRf2d3qZob Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.dk5c1mKTXS + cat /tmp/tmp.TRf2d3qZob Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.dk5c1mKTXS /tmp/tmp.TRf2d3qZob + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace users-vault-7177 + rm -rf /tmp/tmp.MHRGD1kyMd + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.YS2i6pNbfJ + local LAST_OUT=/tmp/tmp.XDqKBzAdWC ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Jn45blkzd6 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.jh3MmdD0pj + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace users-vault-7177 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator