Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/logs/data-at-rest-encryption.log Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + create_infra data-at-rest-encryption-28610 + local ns=data-at-rest-encryption-28610 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.rB78N0ehwv ++ mktemp + local LAST_ERR=/tmp/tmp.Z8iMk6O0eo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rB78N0ehwv customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Z8iMk6O0eo + rm /tmp/tmp.rB78N0ehwv /tmp/tmp.Z8iMk6O0eo + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ALbW24jkIi ++ mktemp + local LAST_ERR=/tmp/tmp.LHVXv9YGTm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ALbW24jkIi customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com condition met + cat /tmp/tmp.LHVXv9YGTm + rm /tmp/tmp.ALbW24jkIi /tmp/tmp.LHVXv9YGTm + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.zDkanKokNE ++ mktemp + local LAST_ERR=/tmp/tmp.VRJUEEW2A4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zDkanKokNE + cat /tmp/tmp.VRJUEEW2A4 + rm /tmp/tmp.zDkanKokNE /tmp/tmp.VRJUEEW2A4 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.tg64D5XuRx ++ mktemp + local LAST_ERR=/tmp/tmp.RThksnh2Wb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tg64D5XuRx + cat /tmp/tmp.RThksnh2Wb + rm /tmp/tmp.tg64D5XuRx /tmp/tmp.RThksnh2Wb + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.y17TQsgeSH ++ mktemp + local LAST_ERR=/tmp/tmp.E44Tq12RJR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y17TQsgeSH clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.E44Tq12RJR + rm /tmp/tmp.y17TQsgeSH /tmp/tmp.E44Tq12RJR + return 0 + check_crd_for_deletion PR-2207-d97a62a5 + local git_tag=PR-2207-d97a62a5 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2207-d97a62a5/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7Ebq6l72zM +++ mktemp ++ local LAST_ERR=/tmp/tmp.PXRwmKGsIZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.7Ebq6l72zM ++ cat /tmp/tmp.PXRwmKGsIZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.7Ebq6l72zM ++ cat /tmp/tmp.PXRwmKGsIZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.7Ebq6l72zM ++ cat /tmp/tmp.PXRwmKGsIZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.7Ebq6l72zM ++ cat /tmp/tmp.PXRwmKGsIZ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.7Ebq6l72zM /tmp/tmp.PXRwmKGsIZ ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator++ mktemp ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.KPT20UKSb0 ++ mktemp + local LAST_OUT=/tmp/tmp.Iq4nVM04r2 ++ mktemp + local LAST_ERR=/tmp/tmp.waWtjMz01G + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.kb0dT4zhbw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Iq4nVM04r2 + cat /tmp/tmp.kb0dT4zhbw + rm /tmp/tmp.Iq4nVM04r2 /tmp/tmp.kb0dT4zhbw + return 0 namespace "data-at-rest-encryption-18973" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KPT20UKSb0 namespace "psmdb-operator" deleted + cat /tmp/tmp.waWtjMz01G + rm /tmp/tmp.KPT20UKSb0 /tmp/tmp.waWtjMz01G + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fkXUe1mD9Y ++ mktemp + local LAST_ERR=/tmp/tmp.VUpECReb6h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fkXUe1mD9Y + cat /tmp/tmp.VUpECReb6h + rm /tmp/tmp.fkXUe1mD9Y /tmp/tmp.VUpECReb6h + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.oQWEGB8dZe ++ mktemp + local LAST_ERR=/tmp/tmp.18sGDR4YwI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oQWEGB8dZe namespace/psmdb-operator created + cat /tmp/tmp.18sGDR4YwI + rm /tmp/tmp.oQWEGB8dZe /tmp/tmp.18sGDR4YwI + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.D3mZiKtua1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LNn0yJE7vB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D3mZiKtua1 ++ cat /tmp/tmp.LNn0yJE7vB ++ rm /tmp/tmp.D3mZiKtua1 /tmp/tmp.LNn0yJE7vB ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2207-d97a62a5-8-cluster2 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Qzuk3mRvC8 ++ mktemp + local LAST_ERR=/tmp/tmp.CKFOJpKPsF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2207-d97a62a5-8-cluster2 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qzuk3mRvC8 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2207-d97a62a5-8-cluster2" modified. + cat /tmp/tmp.CKFOJpKPsF + rm /tmp/tmp.Qzuk3mRvC8 /tmp/tmp.CKFOJpKPsF + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2207-d97a62a5' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2207-d97a62a5 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.YFIZXeVUkJ ++ mktemp + local LAST_ERR=/tmp/tmp.GBSW4nZaRw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YFIZXeVUkJ customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.GBSW4nZaRw + rm /tmp/tmp.YFIZXeVUkJ /tmp/tmp.GBSW4nZaRw + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.emvn8QVNMT ++ mktemp + local LAST_ERR=/tmp/tmp.eEbo1Wae3p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.emvn8QVNMT clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.eEbo1Wae3p + rm /tmp/tmp.emvn8QVNMT /tmp/tmp.eEbo1Wae3p + return 0 + kubectl_bin apply -n psmdb-operator -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2207-d97a62a5") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gftXPdR81J ++ mktemp + local LAST_ERR=/tmp/tmp.hwJYBpgsHP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gftXPdR81J deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.hwJYBpgsHP + rm /tmp/tmp.gftXPdR81J /tmp/tmp.hwJYBpgsHP + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.M3DsZIoENF +++ mktemp ++ local LAST_ERR=/tmp/tmp.A8YNzVpKH5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M3DsZIoENF ++ cat /tmp/tmp.A8YNzVpKH5 ++ rm /tmp/tmp.M3DsZIoENF /tmp/tmp.A8YNzVpKH5 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-7dc7d87dd7-c8zkn + local pod=percona-server-mongodb-operator-7dc7d87dd7-c8zkn + set +o xtrace waiting for pod/percona-server-mongodb-operator-7dc7d87dd7-c8zkn to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.SywoT6F50K +++ mktemp ++ local LAST_ERR=/tmp/tmp.iZ4Pn8Lm4q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SywoT6F50K ++ cat /tmp/tmp.iZ4Pn8Lm4q ++ rm /tmp/tmp.SywoT6F50K /tmp/tmp.iZ4Pn8Lm4q ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-7dc7d87dd7-c8zkn ++ mktemp + local LAST_OUT=/tmp/tmp.ZIP3K7PWpU ++ mktemp + local LAST_ERR=/tmp/tmp.GjvbhZskIk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-7dc7d87dd7-c8zkn + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZIP3K7PWpU + cat /tmp/tmp.GjvbhZskIk + rm /tmp/tmp.ZIP3K7PWpU /tmp/tmp.GjvbhZskIk + return 0 2026-01-20T18:36:51.081Z INFO setup Manager starting up {"gitCommit": "d97a62a5f5f2aebcfba009b60067168bf73bef83", "gitBranch": "PR-2207-d97a62a5", "buildTime": "", "goVersion": "go1.25.6", "os": "linux", "arch": "amd64"} + create_namespace data-at-rest-encryption-28610 + local namespace=data-at-rest-encryption-28610 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces data-at-rest-encryption-28610' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces data-at-rest-encryption-28610 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace data-at-rest-encryption-28610 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.rwJ7lzTAhH ++ mktemp + local LAST_OUT=/tmp/tmp.iyX1ocfwFY ++ mktemp + local LAST_ERR=/tmp/tmp.t0sjB1AVak + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.nFOkijlm66 + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace data-at-rest-encryption-28610 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rwJ7lzTAhH + cat /tmp/tmp.t0sjB1AVak + rm /tmp/tmp.rwJ7lzTAhH /tmp/tmp.t0sjB1AVak + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iyX1ocfwFY + cat /tmp/tmp.nFOkijlm66 + rm /tmp/tmp.iyX1ocfwFY /tmp/tmp.nFOkijlm66 + return 0 + kubectl_bin wait --for=delete namespace data-at-rest-encryption-28610 ++ mktemp + local LAST_OUT=/tmp/tmp.2SdJTICm8I ++ mktemp + local LAST_ERR=/tmp/tmp.BRu80IMnGt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace data-at-rest-encryption-28610 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2SdJTICm8I + cat /tmp/tmp.BRu80IMnGt + rm /tmp/tmp.2SdJTICm8I /tmp/tmp.BRu80IMnGt + return 0 + desc 'create namespace data-at-rest-encryption-28610' + set +o xtrace ----------------------------------------------------------------------------------- create namespace data-at-rest-encryption-28610 ----------------------------------------------------------------------------------- + kubectl_bin create namespace data-at-rest-encryption-28610 ++ mktemp + local LAST_OUT=/tmp/tmp.6U8i7HIlBn ++ mktemp + local LAST_ERR=/tmp/tmp.bpw769KL53 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace data-at-rest-encryption-28610 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6U8i7HIlBn namespace/data-at-rest-encryption-28610 created + cat /tmp/tmp.bpw769KL53 + rm /tmp/tmp.6U8i7HIlBn /tmp/tmp.bpw769KL53 + return 0 + set_kube_ctx data-at-rest-encryption-28610 + local namespace=data-at-rest-encryption-28610 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zmo4p7rDig +++ mktemp ++ local LAST_ERR=/tmp/tmp.5K17DiJE5W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zmo4p7rDig ++ cat /tmp/tmp.5K17DiJE5W ++ rm /tmp/tmp.Zmo4p7rDig /tmp/tmp.5K17DiJE5W ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2207-d97a62a5-8-cluster2 --namespace=data-at-rest-encryption-28610 ++ mktemp + local LAST_OUT=/tmp/tmp.QDOjATRFTj ++ mktemp + local LAST_ERR=/tmp/tmp.dhpITcpN8Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2207-d97a62a5-8-cluster2 --namespace=data-at-rest-encryption-28610 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QDOjATRFTj Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2207-d97a62a5-8-cluster2" modified. + cat /tmp/tmp.dhpITcpN8Y + rm /tmp/tmp.QDOjATRFTj /tmp/tmp.dhpITcpN8Y + return 0 + deploy_vault + local name=vault-service + [[ 0 -gt 0 ]] + desc 'install Vault vault-service' + set +o xtrace ----------------------------------------------------------------------------------- install Vault vault-service ----------------------------------------------------------------------------------- + helm uninstall vault-service Error: uninstall: Release not loaded: vault-service: release: not found + : + helm repo remove hashicorp "hashicorp" has been removed from your repositories + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" has been added to your repositories + destroy_vault vault-service + local name=vault-service + local vault_ns ++ helm list --all-namespaces --filter vault-service ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + vault_ns= + desc 'destroy vault' + set +o xtrace ----------------------------------------------------------------------------------- destroy vault ----------------------------------------------------------------------------------- ++ kubectl api-resources ++ grep vault ++ awk '{print $1}' + '[' -n '' ']' ++ kubectl get clusterrolebinding -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrolebinding vault-service-agent-injector-binding vault-service-server-binding clusterrolebinding.rbac.authorization.k8s.io "vault-service-agent-injector-binding" deleted clusterrolebinding.rbac.authorization.k8s.io "vault-service-server-binding" deleted ++ kubectl get clusterrole -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrole vault-service-agent-injector-clusterrole clusterrole.rbac.authorization.k8s.io "vault-service-agent-injector-clusterrole" deleted ++ kubectl get mutatingwebhookconfiguration -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete mutatingwebhookconfiguration vault-service-agent-injector-cfg mutatingwebhookconfiguration.admissionregistration.k8s.io "vault-service-agent-injector-cfg" deleted + [[ -n '' ]] + retry 10 60 helm install vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false + local max=10 + local delay=60 + shift 2 + local n=1 + helm install vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false NAME: vault-service LAST DEPLOYED: Tue Jan 20 18:37:33 2026 NAMESPACE: data-at-rest-encryption-28610 STATUS: deployed REVISION: 1 NOTES: Thank you for installing HashiCorp Vault! Now that you have deployed Vault, you should look over the docs on using Vault with Kubernetes available here: https://developer.hashicorp.com/vault/docs Your release is named vault-service. To learn more about the release, try: $ helm status vault-service $ helm get manifest vault-service + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running Running + sleep 5 + kubectl_bin exec pod/vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json ++ mktemp + local LAST_OUT=/tmp/tmp.gR3u4zcVWD ++ mktemp + local LAST_ERR=/tmp/tmp.2KdnCa0Q6N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec pod/vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gR3u4zcVWD + cat /tmp/tmp.2KdnCa0Q6N + rm /tmp/tmp.gR3u4zcVWD /tmp/tmp.2KdnCa0Q6N + return 0 + local unsealKey ++ jq -r '.unseal_keys_b64[]' + unsealKey=dJpWFyyCX3LnO8xtPa5U9sc+IS8c+ZPQPmb3B+q3p7s= + local token ++ jq -r .root_token + token=hvs.lgvYxc2Hbw8nXby8sv3HUAlM + kubectl_bin exec pod/vault-service-0 -- vault operator unseal dJpWFyyCX3LnO8xtPa5U9sc+IS8c+ZPQPmb3B+q3p7s= ++ mktemp + local LAST_OUT=/tmp/tmp.0JJ0GM65DT ++ mktemp + local LAST_ERR=/tmp/tmp.jH8WUQfpAG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec pod/vault-service-0 -- vault operator unseal dJpWFyyCX3LnO8xtPa5U9sc+IS8c+ZPQPmb3B+q3p7s= + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0JJ0GM65DT Key Value --- ----- Seal Type shamir Initialized true Sealed false Total Shares 1 Threshold 1 Version 1.21.2 Build Date 2026-01-06T08:33:05Z Storage Type file Cluster Name vault-cluster-ee417787 Cluster ID a00e1f04-e4a8-2c63-4b42-efa9b2b62141 HA Enabled false + cat /tmp/tmp.jH8WUQfpAG + rm /tmp/tmp.0JJ0GM65DT /tmp/tmp.jH8WUQfpAG + return 0 + kubectl_bin exec -it pod/vault-service-0 -- sh ++ mktemp + local LAST_OUT=/tmp/tmp.ek727uxeBA ++ mktemp + local LAST_ERR=/tmp/tmp.ngjnRglhRH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec -it pod/vault-service-0 -- sh + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ek727uxeBA Success! You are now authenticated. The token information displayed below is already stored in the token helper. You do NOT need to run "vault login" again. Future Vault requests will automatically use this token. Key Value --- ----- token hvs.lgvYxc2Hbw8nXby8sv3HUAlM token_accessor LGGi9Yk43DxunOnzgCiBU2Dr token_duration ∞ token_renewable false token_policies ["root"] identity_policies [] policies ["root"] Success! Enabled the kv-v2 secrets engine at: secret/ + cat /tmp/tmp.ngjnRglhRH Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.ek727uxeBA /tmp/tmp.ngjnRglhRH + return 0 + kubectl_bin create secret generic vault-secret --from-literal=token=hvs.lgvYxc2Hbw8nXby8sv3HUAlM ++ mktemp + local LAST_OUT=/tmp/tmp.YT5KfU6BQW ++ mktemp + local LAST_ERR=/tmp/tmp.eK2gHeJ0dI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create secret generic vault-secret --from-literal=token=hvs.lgvYxc2Hbw8nXby8sv3HUAlM + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YT5KfU6BQW secret/vault-secret created + cat /tmp/tmp.eK2gHeJ0dI + rm /tmp/tmp.YT5KfU6BQW /tmp/tmp.eK2gHeJ0dI + return 0 + deploy_minio + local cert_secret= + local service_name=minio-service + desc 'install MinIO: minio-service' + set +o xtrace ----------------------------------------------------------------------------------- install MinIO: minio-service ----------------------------------------------------------------------------------- + helm uninstall minio-service + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + local endpoint=http://minio-service:9000 + minio_args=('--version' '5.4.0' '--set' 'replicas=1' '--set' 'mode=standalone' '--set' 'resources.requests.memory=256Mi' '--set' 'rootUser=rootuser' '--set' 'rootPassword=rootpass123' '--set' 'users[0].accessKey=some-access-key' '--set' 'users[0].secretKey=some-secret-key' '--set' 'users[0].policy=consoleAdmin' '--set' 'service.type=ClusterIP' '--set' 'configPathmc=/tmp/' '--set' 'securityContext.enabled=false' '--set' 'persistence.size=2G' '--set' 'fullnameOverride=minio-service' '--set' 'serviceAccount.create=true' '--set' 'serviceAccount.name=minio-service-sa') + local minio_args + [[ -n '' ]] + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G --set fullnameOverride=minio-service --set serviceAccount.create=true --set serviceAccount.name=minio-service-sa minio/minio NAME: minio-service LAST DEPLOYED: Tue Jan 20 18:38:08 2026 NAMESPACE: data-at-rest-encryption-28610 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.data-at-rest-encryption-28610.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace data-at-rest-encryption-28610 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace data-at-rest-encryption-28610 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace data-at-rest-encryption-28610 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace data-at-rest-encryption-28610 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wMOFe4cF7l +++ mktemp ++ local LAST_ERR=/tmp/tmp.DDFkS1d71j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wMOFe4cF7l ++ cat /tmp/tmp.DDFkS1d71j ++ rm /tmp/tmp.wMOFe4cF7l /tmp/tmp.DDFkS1d71j ++ return 0 + local MINIO_POD=minio-service-549fcd79c5-skh4m + wait_pod minio-service-549fcd79c5-skh4m + local pod=minio-service-549fcd79c5-skh4m + set +o xtrace waiting for pod/minio-service-549fcd79c5-skh4m to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.data-at-rest-encryption-28610.svc.cluster.local --tcp=9000 service/minio-service created + create_minio_bucket operator-testing http://minio-service:9000 + local bucket=operator-testing + local endpoint=http://minio-service:9000 + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.DBqz7QDJfN ++ mktemp + local LAST_ERR=/tmp/tmp.oeFL2uHi4t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DBqz7QDJfN pod "aws-cli" deleted from data-at-rest-encryption-28610 namespace + cat /tmp/tmp.oeFL2uHi4t All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.DBqz7QDJfN /tmp/tmp.oeFL2uHi4t + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ovq82daGfH ++ mktemp + local LAST_ERR=/tmp/tmp.sLxAHXBQE4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ovq82daGfH secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.sLxAHXBQE4 + rm /tmp/tmp.ovq82daGfH /tmp/tmp.sLxAHXBQE4 + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.J5wPVWSw8l ++ mktemp + local LAST_ERR=/tmp/tmp.cqwyovBV30 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J5wPVWSw8l secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.cqwyovBV30 + rm /tmp/tmp.J5wPVWSw8l /tmp/tmp.cqwyovBV30 + return 0 + cluster=some-name + desc 'create PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2207-d97a62a5"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/data-at-rest-encryption-28610/g + local LAST_OUT=/tmp/tmp.XYowDoucLs ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.BBqS8Q95n0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XYowDoucLs perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.BBqS8Q95n0 + rm /tmp/tmp.XYowDoucLs /tmp/tmp.BBqS8Q95n0 + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 false + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sprB9xBCHv +++ mktemp ++ local LAST_ERR=/tmp/tmp.ekMvSt8MIR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sprB9xBCHv ++ cat /tmp/tmp.ekMvSt8MIR ++ rm /tmp/tmp.sprB9xBCHv /tmp/tmp.ekMvSt8MIR ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PFkQ2xJ8fM +++ mktemp ++ local LAST_ERR=/tmp/tmp.CUgWoyq5Rm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PFkQ2xJ8fM ++ cat /tmp/tmp.CUgWoyq5Rm ++ rm /tmp/tmp.PFkQ2xJ8fM /tmp/tmp.CUgWoyq5Rm ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ObHf4VQXif +++ mktemp ++ local LAST_ERR=/tmp/tmp.ICkmAyUTAw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ObHf4VQXif ++ cat /tmp/tmp.ICkmAyUTAw ++ rm /tmp/tmp.ObHf4VQXif /tmp/tmp.ICkmAyUTAw ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ooDqcEdpLq +++ mktemp ++ local LAST_ERR=/tmp/tmp.7vKh5p75xn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ooDqcEdpLq ++ cat /tmp/tmp.7vKh5p75xn ++ rm /tmp/tmp.ooDqcEdpLq /tmp/tmp.7vKh5p75xn ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UZOLoqIJ3N +++ mktemp ++ local LAST_ERR=/tmp/tmp.CBmTS9Ry3x ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UZOLoqIJ3N ++ cat /tmp/tmp.CBmTS9Ry3x ++ rm /tmp/tmp.UZOLoqIJ3N /tmp/tmp.CBmTS9Ry3x ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CNfhaBQEIo +++ mktemp ++ local LAST_ERR=/tmp/tmp.KqTVhaEu83 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CNfhaBQEIo ++ cat /tmp/tmp.KqTVhaEu83 ++ rm /tmp/tmp.CNfhaBQEIo /tmp/tmp.KqTVhaEu83 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rtPWYCwweB +++ mktemp ++ local LAST_ERR=/tmp/tmp.eawWoQues4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rtPWYCwweB ++ cat /tmp/tmp.eawWoQues4 ++ rm /tmp/tmp.rtPWYCwweB /tmp/tmp.eawWoQues4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TYJ70TdHkw +++ mktemp ++ local LAST_ERR=/tmp/tmp.frrBDl9WF0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TYJ70TdHkw ++ cat /tmp/tmp.frrBDl9WF0 ++ rm /tmp/tmp.TYJ70TdHkw /tmp/tmp.frrBDl9WF0 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JzpluOVHAQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.9A9z9Yv6TR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JzpluOVHAQ ++ cat /tmp/tmp.9A9z9Yv6TR ++ rm /tmp/tmp.JzpluOVHAQ /tmp/tmp.9A9z9Yv6TR ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness...... + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.bpXt9EUguu/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("data-at-rest-encryption-28610", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.f29prQHaah ++ mktemp + local LAST_ERR=/tmp/tmp.hdqWY8x5X1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f29prQHaah + cat /tmp/tmp.hdqWY8x5X1 + rm /tmp/tmp.f29prQHaah /tmp/tmp.hdqWY8x5X1 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.bpXt9EUguu/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.bpXt9EUguu/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.bpXt9EUguu/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0.yml /tmp/tmp.bpXt9EUguu/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2026-01-20T18:41:13+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.bpXt9EUguu/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("data-at-rest-encryption-28610", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.SYdrA2rBe4 ++ mktemp + local LAST_ERR=/tmp/tmp.4FjXvXsUfa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SYdrA2rBe4 + cat /tmp/tmp.4FjXvXsUfa + rm /tmp/tmp.SYdrA2rBe4 /tmp/tmp.4FjXvXsUfa + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.bpXt9EUguu/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.bpXt9EUguu/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.bpXt9EUguu/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-cfg.yml /tmp/tmp.bpXt9EUguu/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2026-01-20T18:41:15+0000] compare_kubectl: statefulset/some-name-cfg OK + desc 'create user' + set +o xtrace ----------------------------------------------------------------------------------- create user ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.data-at-rest-encryption-28610 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MDfZLDLHOp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gg4VXOBDrb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MDfZLDLHOp ++ cat /tmp/tmp.Gg4VXOBDrb ++ rm /tmp/tmp.MDfZLDLHOp /tmp/tmp.Gg4VXOBDrb ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.E58BfigL6n ++ mktemp + local LAST_ERR=/tmp/tmp.xTyyxd8441 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E58BfigL6n Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("385d526a-70b5-487d-ab43-330a839aa18c") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.xTyyxd8441 + rm /tmp/tmp.E58BfigL6n /tmp/tmp.xTyyxd8441 + return 0 + sleep 2 + desc 'write data, read it' + set +o xtrace ----------------------------------------------------------------------------------- write data, read it ----------------------------------------------------------------------------------- + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OKUcBucg7B +++ mktemp ++ local LAST_ERR=/tmp/tmp.RB6b9SWEwp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OKUcBucg7B ++ cat /tmp/tmp.RB6b9SWEwp ++ rm /tmp/tmp.OKUcBucg7B /tmp/tmp.RB6b9SWEwp ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.4aFV1JrYbt ++ mktemp + local LAST_ERR=/tmp/tmp.8TbRS2tCh8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4aFV1JrYbt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("81cef2fa-805c-4d1e-80cf-c20256474cf6") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.8TbRS2tCh8 + rm /tmp/tmp.4aFV1JrYbt /tmp/tmp.8TbRS2tCh8 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local command=find + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-01-20T18:41:22+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XTyL8Sf1WR +++ mktemp ++ local LAST_ERR=/tmp/tmp.zdpiy6wyNA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XTyL8Sf1WR ++ cat /tmp/tmp.zdpiy6wyNA ++ rm /tmp/tmp.XTyL8Sf1WR /tmp/tmp.zdpiy6wyNA ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.UfUCeuXNnJ ++ mktemp + local LAST_ERR=/tmp/tmp.FGgfOmVqZR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UfUCeuXNnJ + cat /tmp/tmp.FGgfOmVqZR + rm /tmp/tmp.UfUCeuXNnJ /tmp/tmp.FGgfOmVqZR + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/find.json /tmp/tmp.bpXt9EUguu/find + desc 'run backups' + set +o xtrace ----------------------------------------------------------------------------------- run backups ----------------------------------------------------------------------------------- + backup_name_minio=backup-minio + backup_name_gcp=backup-gcp-cs + desc 'run minio backup backup-minio' + set +o xtrace ----------------------------------------------------------------------------------- run minio backup backup-minio ----------------------------------------------------------------------------------- + run_backup minio + local storage=minio + local backup_name=backup-minio + local type=logical + log 'running backup backup-minio' + set +o xtrace [2026-01-20T18:41:24+0000] running backup backup-minio + yq eval '.metadata.name = "backup-minio" | .spec.storageName = "minio" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/backup-minio.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.g8csZuNT5w ++ mktemp + local LAST_ERR=/tmp/tmp.80gtSRTQVc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g8csZuNT5w perconaservermongodbbackup.psmdb.percona.com/backup-minio created + cat /tmp/tmp.80gtSRTQVc + rm /tmp/tmp.g8csZuNT5w /tmp/tmp.80gtSRTQVc + return 0 + wait_backup backup-minio + local backup_name=backup-minio + local target_state=ready + set +o xtrace waiting for backup-minio to reach ready state..................OK + '[' -z '' ']' + desc 'run minio backup backup-gcp-cs' + set +o xtrace ----------------------------------------------------------------------------------- run minio backup backup-gcp-cs ----------------------------------------------------------------------------------- + run_backup gcp-cs + local storage=gcp-cs + local backup_name=backup-gcp-cs + local type=logical + log 'running backup backup-gcp-cs' + set +o xtrace [2026-01-20T18:41:58+0000] running backup backup-gcp-cs + yq eval '.metadata.name = "backup-gcp-cs" | .spec.storageName = "gcp-cs" | .spec.type = "logical"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/backup-gcp-cs.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.lNawVd2VZ9 ++ mktemp + local LAST_ERR=/tmp/tmp.4L6AAb2y82 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lNawVd2VZ9 perconaservermongodbbackup.psmdb.percona.com/backup-gcp-cs created + cat /tmp/tmp.4L6AAb2y82 + rm /tmp/tmp.lNawVd2VZ9 /tmp/tmp.4L6AAb2y82 + return 0 + wait_backup backup-gcp-cs + local backup_name=backup-gcp-cs + local target_state=ready + set +o xtrace waiting for backup-gcp-cs to reach ready state.................OK + sleep 5 + desc 'check backup and restore -- minio' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- minio ----------------------------------------------------------------------------------- + check_backup_in_storage backup-minio minio rs0 myApp.test.gz + local backup=backup-minio + local storage_type=minio + local replset=rs0 + local file=myApp.test.gz + local protocol=http + local endpoint ++ get_backup_dest backup-minio ++ local backup_name=backup-minio ++ /usr/sbin/sed 's|https://engk8soperators.blob.core.windows.net/||' ++ kubectl_bin get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.cEy5x3lehz +++ mktemp ++ local LAST_ERR=/tmp/tmp.Oh49CLXArL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-minio -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cEy5x3lehz ++ cat /tmp/tmp.Oh49CLXArL ++ rm /tmp/tmp.cEy5x3lehz /tmp/tmp.Oh49CLXArL ++ return 0 + backup_dest=operator-testing/2026-01-20T18:41:26Z + case ${storage_type} in + endpoint=minio-service + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2026-01-20T18:41:26Z/rs0/myApp.test.gz + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.g63VAOyhOr ++ mktemp + local LAST_ERR=/tmp/tmp.euhs3Ut3Xx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2026-01-20T18:41:26Z/rs0/myApp.test.gz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g63VAOyhOr + cat /tmp/tmp.euhs3Ut3Xx All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. + rm /tmp/tmp.g63VAOyhOr /tmp/tmp.euhs3Ut3Xx + return 0 + sleep 1 + let retry+=1 + '[' 17 -ge 60 ']' + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2026-01-20T18:41:26Z/rs0/myApp.test.gz + grep myApp.test.gz ++ mktemp + local LAST_OUT=/tmp/tmp.5XczsnEY7n ++ mktemp + local LAST_ERR=/tmp/tmp.7faTL0cScp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --no-verify-ssl --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/2026-01-20T18:41:26Z/rs0/myApp.test.gz + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5XczsnEY7n + cat /tmp/tmp.7faTL0cScp All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: Internal error occurred: error attaching to container: container is in CONTAINER_EXITED state + rm /tmp/tmp.5XczsnEY7n /tmp/tmp.7faTL0cScp + return 0 2026-01-20 18:41:31 55 myApp.test.gz + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x7vIHekZP0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7yPJAipnpu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x7vIHekZP0 ++ cat /tmp/tmp.7yPJAipnpu ++ rm /tmp/tmp.x7vIHekZP0 /tmp/tmp.7yPJAipnpu ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.JAplDCwTfM ++ mktemp + local LAST_ERR=/tmp/tmp.ERn2koHRxo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JAplDCwTfM Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("a22fad6b-b798-4bde-8513-5bb951298532") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ERn2koHRxo + rm /tmp/tmp.JAplDCwTfM /tmp/tmp.ERn2koHRxo + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-01-20T18:42:56+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KzTpjicEHb +++ mktemp ++ local LAST_ERR=/tmp/tmp.WVxrlAIsEd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KzTpjicEHb ++ cat /tmp/tmp.WVxrlAIsEd ++ rm /tmp/tmp.KzTpjicEHb /tmp/tmp.WVxrlAIsEd ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.c3Ll5zf6xz ++ mktemp + local LAST_ERR=/tmp/tmp.0kiJEpAkIw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c3Ll5zf6xz + cat /tmp/tmp.0kiJEpAkIw + rm /tmp/tmp.c3Ll5zf6xz /tmp/tmp.0kiJEpAkIw + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/find-2nd.json /tmp/tmp.bpXt9EUguu/find-2nd + run_restore backup-minio + local backup_name=backup-minio + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/restore.yml + log 'running restore restore-backup-minio' + set +o xtrace [2026-01-20T18:42:59+0000] running restore restore-backup-minio + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-minio/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-minio/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.kNrY4sMuu9 ++ mktemp + local LAST_ERR=/tmp/tmp.wE6W7m27yn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kNrY4sMuu9 perconaservermongodbrestore.psmdb.percona.com/restore-backup-minio created + cat /tmp/tmp.wE6W7m27yn + rm /tmp/tmp.kNrY4sMuu9 /tmp/tmp.wE6W7m27yn + return 0 + wait_restore backup-minio some-name + local backup_name=backup-minio + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-minio object to be created.OK Waiting psmdb-restore/restore-backup-minio to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K0Phudybkj +++ mktemp ++ local LAST_ERR=/tmp/tmp.3HoEtzY65w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K0Phudybkj ++ cat /tmp/tmp.3HoEtzY65w ++ rm /tmp/tmp.K0Phudybkj /tmp/tmp.3HoEtzY65w ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DYX1OMcMrP +++ mktemp ++ local LAST_ERR=/tmp/tmp.zuh3wOdIBE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DYX1OMcMrP ++ cat /tmp/tmp.zuh3wOdIBE ++ rm /tmp/tmp.DYX1OMcMrP /tmp/tmp.zuh3wOdIBE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NKoVErdgnE +++ mktemp ++ local LAST_ERR=/tmp/tmp.RNCHYSeWhE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NKoVErdgnE ++ cat /tmp/tmp.RNCHYSeWhE ++ rm /tmp/tmp.NKoVErdgnE /tmp/tmp.RNCHYSeWhE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8HZqyfPgKc +++ mktemp ++ local LAST_ERR=/tmp/tmp.A57VaUvcCC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8HZqyfPgKc ++ cat /tmp/tmp.A57VaUvcCC ++ rm /tmp/tmp.8HZqyfPgKc /tmp/tmp.A57VaUvcCC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uWcfWJVHF0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.x9w38AgOAo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uWcfWJVHF0 ++ cat /tmp/tmp.x9w38AgOAo ++ rm /tmp/tmp.uWcfWJVHF0 /tmp/tmp.x9w38AgOAo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vo1Dpzuxja +++ mktemp ++ local LAST_ERR=/tmp/tmp.90pCx5HLxX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vo1Dpzuxja ++ cat /tmp/tmp.90pCx5HLxX ++ rm /tmp/tmp.Vo1Dpzuxja /tmp/tmp.90pCx5HLxX ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongos_cmd find myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local command=find + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-01-20T18:44:37+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iVzpith4Qg +++ mktemp ++ local LAST_ERR=/tmp/tmp.eR966bfnUK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iVzpith4Qg ++ cat /tmp/tmp.eR966bfnUK ++ rm /tmp/tmp.iVzpith4Qg /tmp/tmp.eR966bfnUK ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.uQYS7IzheI ++ mktemp + local LAST_ERR=/tmp/tmp.CU5cUmCnB2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uQYS7IzheI + cat /tmp/tmp.CU5cUmCnB2 + rm /tmp/tmp.uQYS7IzheI /tmp/tmp.CU5cUmCnB2 + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/find.json /tmp/tmp.bpXt9EUguu/find + '[' -z '' ']' + desc 'check backup and restore -- gcp-cs' + set +o xtrace ----------------------------------------------------------------------------------- check backup and restore -- gcp-cs ----------------------------------------------------------------------------------- ++ get_backup_dest backup-gcp-cs ++ local backup_name=backup-gcp-cs ++ kubectl_bin get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ sed -e 's/.json$//' ++ sed 's|s3://||' +++ mktemp ++ sed 's|azure://||' ++ sed 's|gs://||' ++ local LAST_OUT=/tmp/tmp.uzmligjhTe +++ mktemp ++ local LAST_ERR=/tmp/tmp.lShkYELawf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup backup-gcp-cs -o 'jsonpath={.status.destination}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uzmligjhTe ++ cat /tmp/tmp.lShkYELawf ++ rm /tmp/tmp.uzmligjhTe /tmp/tmp.lShkYELawf ++ return 0 + backup_dest_gcp=operator-testing/psmdb/2026-01-20T18:42:01Z + setup_gcs_credentials + local secret_name=gcp-cs-secret + gsutil ls + echo 'Setting up GCS credentials from K8s secret: gcp-cs-secret' Setting up GCS credentials from K8s secret: gcp-cs-secret + local trace_was_on=0 + [[ ehxB == *x* ]] + trace_was_on=1 + set +x + echo 'GCS credentials configured successfully' GCS credentials configured successfully + check_backup_existence_gcs operator-testing/psmdb/2026-01-20T18:42:01Z /rs0/myApp.test.gz + backup_dest_gcp=operator-testing/psmdb/2026-01-20T18:42:01Z + obj=/rs0/myApp.test.gz + storage_name=gcp-cs + retry=0 + gcs_path=gs://operator-testing/psmdb/2026-01-20T18:42:01Z/rs0/myApp.test.gz + gsutil ls gs://operator-testing/psmdb/2026-01-20T18:42:01Z/rs0/myApp.test.gz + echo 'Backup gs://operator-testing/psmdb/2026-01-20T18:42:01Z/rs0/myApp.test.gz found in gcp-cs' Backup gs://operator-testing/psmdb/2026-01-20T18:42:01Z/rs0/myApp.test.gz found in gcp-cs + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FmgumEJfOe +++ mktemp ++ local LAST_ERR=/tmp/tmp.wODyo4jk1G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FmgumEJfOe ++ cat /tmp/tmp.wODyo4jk1G ++ rm /tmp/tmp.FmgumEJfOe /tmp/tmp.wODyo4jk1G ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.TmGNEju2WX ++ mktemp + local LAST_ERR=/tmp/tmp.CUP5AvEdh0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TmGNEju2WX Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("1a00e91e-272a-4f43-a5c8-d51633949366") } Percona Server for MongoDB server version: v8.0.17-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.CUP5AvEdh0 + rm /tmp/tmp.TmGNEju2WX /tmp/tmp.CUP5AvEdh0 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-01-20T18:44:46+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 mongodb '' '' 27017 + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5On6s2Xx8I +++ mktemp ++ local LAST_ERR=/tmp/tmp.iTzVdodeuu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5On6s2Xx8I ++ cat /tmp/tmp.iTzVdodeuu ++ rm /tmp/tmp.5On6s2Xx8I /tmp/tmp.iTzVdodeuu ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.VC40B8zvux ++ mktemp + local LAST_ERR=/tmp/tmp.KaKClg2hwV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VC40B8zvux + cat /tmp/tmp.KaKClg2hwV + rm /tmp/tmp.VC40B8zvux /tmp/tmp.KaKClg2hwV + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/find-2nd.json /tmp/tmp.bpXt9EUguu/find-2nd + run_restore backup-gcp-cs + local backup_name=backup-gcp-cs + local restore_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/restore.yml + log 'running restore restore-backup-gcp-cs' + set +o xtrace [2026-01-20T18:44:48+0000] running restore restore-backup-gcp-cs + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/restore.yml + /usr/sbin/sed -e 's/name:/name: restore-backup-gcp-cs/' + /usr/sbin/sed -e 's/backupName:/backupName: backup-gcp-cs/' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.k1JxuHWeAt ++ mktemp + local LAST_ERR=/tmp/tmp.mOjClDw6LO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k1JxuHWeAt perconaservermongodbrestore.psmdb.percona.com/restore-backup-gcp-cs created + cat /tmp/tmp.mOjClDw6LO + rm /tmp/tmp.k1JxuHWeAt /tmp/tmp.mOjClDw6LO + return 0 + wait_restore backup-gcp-cs some-name + local backup_name=backup-gcp-cs + local cluster_name=some-name + local target_state=ready + local wait_cluster_consistency=1 + local wait_time=1780 + local ok_if_ready=0 + set +o xtrace Waiting for the psmdb-restore/restore-backup-gcp-cs object to be created.OK Waiting psmdb-restore/restore-backup-gcp-cs to reach state "ready" .OK after 0 minutes + [[ 1 -eq 1 ]] + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rmtE0VbxGd +++ mktemp ++ local LAST_ERR=/tmp/tmp.nF5EZj5eGC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rmtE0VbxGd ++ cat /tmp/tmp.nF5EZj5eGC ++ rm /tmp/tmp.rmtE0VbxGd /tmp/tmp.nF5EZj5eGC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wynqNVKspn +++ mktemp ++ local LAST_ERR=/tmp/tmp.dFQnREGwTJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wynqNVKspn ++ cat /tmp/tmp.dFQnREGwTJ ++ rm /tmp/tmp.wynqNVKspn /tmp/tmp.dFQnREGwTJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bTpjimod6X +++ mktemp ++ local LAST_ERR=/tmp/tmp.SalFCGwq6M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bTpjimod6X ++ cat /tmp/tmp.SalFCGwq6M ++ rm /tmp/tmp.bTpjimod6X /tmp/tmp.SalFCGwq6M ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7eDQ7TeUWJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.KIukVXpzyH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7eDQ7TeUWJ ++ cat /tmp/tmp.KIukVXpzyH ++ rm /tmp/tmp.7eDQ7TeUWJ /tmp/tmp.KIukVXpzyH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zG7EwDUcSv +++ mktemp ++ local LAST_ERR=/tmp/tmp.2JnLyQZqY5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zG7EwDUcSv ++ cat /tmp/tmp.2JnLyQZqY5 ++ rm /tmp/tmp.zG7EwDUcSv /tmp/tmp.2JnLyQZqY5 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_mongos_cmd find myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local command=find + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-01-20T18:46:12+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.data-at-rest-encryption-28610 + local driver=mongodb + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gq7sgdM9CY +++ mktemp ++ local LAST_ERR=/tmp/tmp.1opSVJ1FwN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gq7sgdM9CY ++ cat /tmp/tmp.1opSVJ1FwN ++ rm /tmp/tmp.Gq7sgdM9CY /tmp/tmp.1opSVJ1FwN ++ return 0 + local client_container=psmdb-client-696897d69b-vc2p7 + kubectl_bin exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5KhrTADp93 ++ mktemp + local LAST_ERR=/tmp/tmp.E4TayT8Hkm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-vc2p7 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.data-at-rest-encryption-28610.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5KhrTADp93 + cat /tmp/tmp.E4TayT8Hkm + rm /tmp/tmp.5KhrTADp93 /tmp/tmp.E4TayT8Hkm + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/compare/find.json /tmp/tmp.bpXt9EUguu/find + desc 'check logs that cluster encrypted' + set +o xtrace ----------------------------------------------------------------------------------- check logs that cluster encrypted ----------------------------------------------------------------------------------- + kubectl_bin logs some-name-rs0-0 -c mongod + grep -i 'Encryption keys DB is initialized successfully' ++ mktemp + local LAST_OUT=/tmp/tmp.U36rnjfcM1 ++ mktemp + local LAST_ERR=/tmp/tmp.s5zpnlsEiG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs some-name-rs0-0 -c mongod + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U36rnjfcM1 + cat /tmp/tmp.s5zpnlsEiG + rm /tmp/tmp.U36rnjfcM1 /tmp/tmp.s5zpnlsEiG + return 0 {"t":{"$date":"2026-01-20T18:39:06.444+00:00"},"s":"I", "c":"STORAGE", "id":29039, "svc":"S", "ctx":"initandlisten","msg":"Encryption keys DB is initialized successfully"} + log 'psmdb/some-name is properly encrypted: OK' + set +o xtrace [2026-01-20T18:46:16+0000] psmdb/some-name is properly encrypted: OK + desc 'check logs for unencrypted cluster' + set +o xtrace ----------------------------------------------------------------------------------- check logs for unencrypted cluster ----------------------------------------------------------------------------------- + log 'disabling encryption (this will cause pods to crash because data is already encrypted)' + set +o xtrace [2026-01-20T18:46:16+0000] disabling encryption (this will cause pods to crash because data is already encrypted) + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/some-name-unencrypted.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/some-name-unencrypted.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/e2e-tests/data-at-rest-encryption/conf/some-name-unencrypted.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2207-d97a62a5"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/data-at-rest-encryption-28610/g + local LAST_OUT=/tmp/tmp.a4fp1sFkac ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.1HIg9QnlyI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a4fp1sFkac perconaservermongodb.psmdb.percona.com/some-name configured + cat /tmp/tmp.1HIg9QnlyI + rm /tmp/tmp.a4fp1sFkac /tmp/tmp.1HIg9QnlyI + return 0 + log 'waiting for 30 seconds' + set +o xtrace [2026-01-20T18:46:20+0000] waiting for 30 seconds + sleep 30 + log 'checking if encryption is disabled' + set +o xtrace [2026-01-20T18:46:50+0000] checking if encryption is disabled + retry=0 + encryption_disabled=0 + true ++ kubectl_bin get pod -l app.kubernetes.io/component=cfg --no-headers ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mFxqs6xOCi +++ mktemp ++ local LAST_ERR=/tmp/tmp.JmgwlqjMqf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -l app.kubernetes.io/component=cfg --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mFxqs6xOCi ++ cat /tmp/tmp.JmgwlqjMqf ++ rm /tmp/tmp.mFxqs6xOCi /tmp/tmp.JmgwlqjMqf ++ return 0 + for pod in $(kubectl_bin get pod -l app.kubernetes.io/component=cfg --no-headers | awk '{print $1}') + log 'checking pod/some-name-cfg-0 logs' + set +o xtrace [2026-01-20T18:46:51+0000] checking pod/some-name-cfg-0 logs + kubectl_bin logs some-name-cfg-0 -c mongod + grep -i 'This may be due to the database files being encrypted, being from an older version or due to corruption on disk' ++ mktemp + local LAST_OUT=/tmp/tmp.ZXG0qUyYsk ++ mktemp + local LAST_ERR=/tmp/tmp.DYQ4WH590e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs some-name-cfg-0 -c mongod + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZXG0qUyYsk + cat /tmp/tmp.DYQ4WH590e + rm /tmp/tmp.ZXG0qUyYsk /tmp/tmp.DYQ4WH590e + return 0 + for pod in $(kubectl_bin get pod -l app.kubernetes.io/component=cfg --no-headers | awk '{print $1}') + log 'checking pod/some-name-cfg-1 logs' + set +o xtrace [2026-01-20T18:46:53+0000] checking pod/some-name-cfg-1 logs + kubectl_bin logs some-name-cfg-1 -c mongod + grep -i 'This may be due to the database files being encrypted, being from an older version or due to corruption on disk' ++ mktemp + local LAST_OUT=/tmp/tmp.xcUYFzHPha ++ mktemp + local LAST_ERR=/tmp/tmp.618JsN0q20 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs some-name-cfg-1 -c mongod + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xcUYFzHPha + cat /tmp/tmp.618JsN0q20 + rm /tmp/tmp.xcUYFzHPha /tmp/tmp.618JsN0q20 + return 0 + for pod in $(kubectl_bin get pod -l app.kubernetes.io/component=cfg --no-headers | awk '{print $1}') + log 'checking pod/some-name-cfg-2 logs' + set +o xtrace [2026-01-20T18:46:55+0000] checking pod/some-name-cfg-2 logs + kubectl_bin logs some-name-cfg-2 -c mongod + grep -i 'This may be due to the database files being encrypted, being from an older version or due to corruption on disk' ++ mktemp + local LAST_OUT=/tmp/tmp.DuzeiAWDrM ++ mktemp + local LAST_ERR=/tmp/tmp.8cz8lq3HUl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs some-name-cfg-2 -c mongod + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DuzeiAWDrM + cat /tmp/tmp.8cz8lq3HUl + rm /tmp/tmp.DuzeiAWDrM /tmp/tmp.8cz8lq3HUl + return 0 [1768934815:143618][1:0x7b1ef2dc4200], file:WiredTiger.wt, connection: [WT_VERB_DEFAULT][ERROR]: __wti_btree_tree_open, 665: This may be due to the database files being encrypted, being from an older version or due to corruption on disk: WT_PANIC: WiredTiger library panic [1768934815:147093][1:0x7b1ef2dc4200], file:WiredTiger.wt, connection: [WT_VERB_DEFAULT][ERROR]: __wti_btree_tree_open, 665: This may be due to the database files being encrypted, being from an older version or due to corruption on disk: WT_PANIC: WiredTiger library panic [1768934815:150525][1:0x7b1ef2dc4200], file:WiredTiger.wt, connection: [WT_VERB_DEFAULT][ERROR]: __wti_btree_tree_open, 665: This may be due to the database files being encrypted, being from an older version or due to corruption on disk: WT_PANIC: WiredTiger library panic + encryption_disabled=1 ++ kubectl_bin get pod -l app.kubernetes.io/component=mongod --no-headers ++ awk '{print $1}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AK08DooCLC +++ mktemp ++ local LAST_ERR=/tmp/tmp.dKyqxKgvLU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pod -l app.kubernetes.io/component=mongod --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AK08DooCLC ++ cat /tmp/tmp.dKyqxKgvLU ++ rm /tmp/tmp.AK08DooCLC /tmp/tmp.dKyqxKgvLU ++ return 0 + for pod in $(kubectl_bin get pod -l app.kubernetes.io/component=mongod --no-headers | awk '{print $1}') + log 'checking pod/some-name-rs0-0 logs' + set +o xtrace [2026-01-20T18:46:57+0000] checking pod/some-name-rs0-0 logs + kubectl_bin logs some-name-rs0-0 -c mongod + grep -i 'This may be due to the database files being encrypted, being from an older version or due to corruption on disk' ++ mktemp + local LAST_OUT=/tmp/tmp.vdrQEzVzhc ++ mktemp + local LAST_ERR=/tmp/tmp.mt2fBelPQY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs some-name-rs0-0 -c mongod + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vdrQEzVzhc + cat /tmp/tmp.mt2fBelPQY + rm /tmp/tmp.vdrQEzVzhc /tmp/tmp.mt2fBelPQY + return 0 + for pod in $(kubectl_bin get pod -l app.kubernetes.io/component=mongod --no-headers | awk '{print $1}') + log 'checking pod/some-name-rs0-1 logs' + set +o xtrace [2026-01-20T18:46:58+0000] checking pod/some-name-rs0-1 logs + kubectl_bin logs some-name-rs0-1 -c mongod + grep -i 'This may be due to the database files being encrypted, being from an older version or due to corruption on disk' ++ mktemp + local LAST_OUT=/tmp/tmp.XfSuTnXzAb ++ mktemp + local LAST_ERR=/tmp/tmp.i4S9RCxROp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs some-name-rs0-1 -c mongod + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XfSuTnXzAb + cat /tmp/tmp.i4S9RCxROp + rm /tmp/tmp.XfSuTnXzAb /tmp/tmp.i4S9RCxROp + return 0 + for pod in $(kubectl_bin get pod -l app.kubernetes.io/component=mongod --no-headers | awk '{print $1}') + log 'checking pod/some-name-rs0-2 logs' + set +o xtrace [2026-01-20T18:47:00+0000] checking pod/some-name-rs0-2 logs + kubectl_bin logs some-name-rs0-2 -c mongod + grep -i 'This may be due to the database files being encrypted, being from an older version or due to corruption on disk' ++ mktemp + local LAST_OUT=/tmp/tmp.KbIsEPlNOR ++ mktemp + local LAST_ERR=/tmp/tmp.jL90fmItCc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs some-name-rs0-2 -c mongod + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KbIsEPlNOR + cat /tmp/tmp.jL90fmItCc + rm /tmp/tmp.KbIsEPlNOR /tmp/tmp.jL90fmItCc + return 0 + [[ 1 == 1 ]] + log 'psmdb/some-name encryption is disabled: OK' + set +o xtrace [2026-01-20T18:47:01+0000] psmdb/some-name encryption is disabled: OK + break + destroy_vault + local name=vault-service + local vault_ns ++ helm list --all-namespaces --filter vault-service ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + vault_ns=data-at-rest-encryption-28610 + desc 'destroy vault' + set +o xtrace ----------------------------------------------------------------------------------- destroy vault ----------------------------------------------------------------------------------- ++ kubectl api-resources ++ grep vault ++ awk '{print $1}' + '[' -n data-at-rest-encryption-28610 ']' + helm uninstall vault-service --namespace data-at-rest-encryption-28610 release "vault-service" uninstalled ++ kubectl get clusterrolebinding -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : ++ kubectl get mutatingwebhookconfiguration -l app.kubernetes.io/instance=vault-service -o 'jsonpath={range .items[*]}{.metadata.name}{" "}{end}' + timeout 30 kubectl delete mutatingwebhookconfiguration error: resource(s) were provided, but no name was specified + : + destroy data-at-rest-encryption-28610 + local namespace=data-at-rest-encryption-28610 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.wjwOV0oQwG +++ mktemp ++ local LAST_ERR=/tmp/tmp.OqCdtonp0O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wjwOV0oQwG ++ cat /tmp/tmp.OqCdtonp0O ++ rm /tmp/tmp.wjwOV0oQwG /tmp/tmp.OqCdtonp0O ++ return 0 + '[' 2 '!=' 0 ']' + kubectl_bin get psmdb-backup ++ mktemp + local LAST_OUT=/tmp/tmp.ViXFKqMTgn ++ mktemp + local LAST_ERR=/tmp/tmp.FayIT7aR3C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get psmdb-backup + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ViXFKqMTgn NAME CLUSTER STORAGE DESTINATION TYPE SIZE STATUS COMPLETED AGE backup-gcp-cs some-name gcp-cs s3://operator-testing/psmdb/2026-01-20T18:42:01Z logical 94.64KB ready 4m39s 5m7s backup-minio some-name minio s3://operator-testing/2026-01-20T18:41:26Z logical 90.14KB ready 5m15s 5m43s + cat /tmp/tmp.FayIT7aR3C + rm /tmp/tmp.ViXFKqMTgn /tmp/tmp.FayIT7aR3C + return 0 + kubectl_bin delete psmdb-backup --all ++ mktemp + local LAST_OUT=/tmp/tmp.k6Bs6wTVx8 ++ mktemp + local LAST_ERR=/tmp/tmp.CdNlyJMP71 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb-backup --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k6Bs6wTVx8 perconaservermongodbbackup.psmdb.percona.com "backup-gcp-cs" deleted from data-at-rest-encryption-28610 namespace perconaservermongodbbackup.psmdb.percona.com "backup-minio" deleted from data-at-rest-encryption-28610 namespace + cat /tmp/tmp.CdNlyJMP71 + rm /tmp/tmp.k6Bs6wTVx8 /tmp/tmp.CdNlyJMP71 + return 0 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.GV2yLkN7UF ++ mktemp + local LAST_ERR=/tmp/tmp.3udYqHGPE5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GV2yLkN7UF customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.3udYqHGPE5 + rm /tmp/tmp.GV2yLkN7UF /tmp/tmp.3udYqHGPE5 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.BQVoK2aYk4 ++ mktemp + local LAST_ERR=/tmp/tmp.SdrqCkmI7a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BQVoK2aYk4 + cat /tmp/tmp.SdrqCkmI7a + rm /tmp/tmp.BQVoK2aYk4 /tmp/tmp.SdrqCkmI7a + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qqxTcuRSsN ++ mktemp + local LAST_ERR=/tmp/tmp.EKeffTelJO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qqxTcuRSsN + cat /tmp/tmp.EKeffTelJO + rm /tmp/tmp.qqxTcuRSsN /tmp/tmp.EKeffTelJO + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.10NVAFDZWS ++ mktemp + local LAST_ERR=/tmp/tmp.h7z0hLHYim + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.10NVAFDZWS + cat /tmp/tmp.h7z0hLHYim + rm /tmp/tmp.10NVAFDZWS /tmp/tmp.h7z0hLHYim + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.02d3Zmu7PA ++ mktemp + local LAST_ERR=/tmp/tmp.qQV7EFGZHh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2207/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.02d3Zmu7PA clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.qQV7EFGZHh + rm /tmp/tmp.02d3Zmu7PA /tmp/tmp.qQV7EFGZHh + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.oTfROqLcSO ++ mktemp + local LAST_ERR=/tmp/tmp.gkjwtU0lz7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.oTfROqLcSO + cat /tmp/tmp.gkjwtU0lz7 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.oTfROqLcSO + cat /tmp/tmp.gkjwtU0lz7 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.oTfROqLcSO + cat /tmp/tmp.gkjwtU0lz7 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.oTfROqLcSO + cat /tmp/tmp.gkjwtU0lz7 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.oTfROqLcSO /tmp/tmp.gkjwtU0lz7 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace data-at-rest-encryption-28610 + rm -rf /tmp/tmp.bpXt9EUguu + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.0vyiMhcW0a + desc 'test passed' + set +o xtrace + local LAST_OUT=/tmp/tmp.Ab9D6I0if0 ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.jhYqOL953W + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.L4MRnnueJW + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace data-at-rest-encryption-28610