Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/logs/data-at-rest-encryption.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra data-at-rest-encryption-19823 + local ns=data-at-rest-encryption-19823 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.WKXsYCCTkH ++ mktemp + local LAST_ERR=/tmp/tmp.tKmTHVo4OH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WKXsYCCTkH customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.tKmTHVo4OH + rm /tmp/tmp.WKXsYCCTkH /tmp/tmp.tKmTHVo4OH + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xvdiZNiITq ++ mktemp + local LAST_ERR=/tmp/tmp.mrjiAmGxAp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xvdiZNiITq + cat /tmp/tmp.mrjiAmGxAp + rm /tmp/tmp.xvdiZNiITq /tmp/tmp.mrjiAmGxAp + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.A0HIHiAuMA ++ mktemp + local LAST_ERR=/tmp/tmp.BQXFpYBxub + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.A0HIHiAuMA + cat /tmp/tmp.BQXFpYBxub + rm /tmp/tmp.A0HIHiAuMA /tmp/tmp.BQXFpYBxub + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.SEPk2ZXSwB ++ mktemp + local LAST_ERR=/tmp/tmp.P1ZiDvJSen + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SEPk2ZXSwB + cat /tmp/tmp.P1ZiDvJSen + rm /tmp/tmp.SEPk2ZXSwB /tmp/tmp.P1ZiDvJSen + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.zWdHEhlEPd ++ mktemp + local LAST_ERR=/tmp/tmp.kYdbsucMC4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zWdHEhlEPd clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.kYdbsucMC4 + rm /tmp/tmp.zWdHEhlEPd /tmp/tmp.kYdbsucMC4 + return 0 + check_crd_for_deletion PR-1917-d6e9d6b1 + local git_tag=PR-1917-d6e9d6b1 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1917-d6e9d6b1/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.58kX8awypk +++ mktemp ++ local LAST_ERR=/tmp/tmp.aJBeAWwnpN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.58kX8awypk ++ cat /tmp/tmp.aJBeAWwnpN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.58kX8awypk ++ cat /tmp/tmp.aJBeAWwnpN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.58kX8awypk ++ cat /tmp/tmp.aJBeAWwnpN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.58kX8awypk ++ cat /tmp/tmp.aJBeAWwnpN Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.58kX8awypk /tmp/tmp.aJBeAWwnpN ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + awk '{print$1}' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.76KvfGuWuF ++ mktemp + local LAST_ERR=/tmp/tmp.CUbmSZind7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.WMlXFmD7HC ++ mktemp + local LAST_ERR=/tmp/tmp.hEuOY2USM7 + local exit_status=0 + local timeout=4 + xargs kubectl delete ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.76KvfGuWuF + cat /tmp/tmp.CUbmSZind7 + rm /tmp/tmp.76KvfGuWuF /tmp/tmp.CUbmSZind7 + return 0 namespace "data-at-rest-encryption-10763" deleted namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WMlXFmD7HC namespace "psmdb-operator" deleted + cat /tmp/tmp.hEuOY2USM7 + rm /tmp/tmp.WMlXFmD7HC /tmp/tmp.hEuOY2USM7 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.OGeuvr9U6r ++ mktemp + local LAST_ERR=/tmp/tmp.wIeAyHdg45 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OGeuvr9U6r + cat /tmp/tmp.wIeAyHdg45 + rm /tmp/tmp.OGeuvr9U6r /tmp/tmp.wIeAyHdg45 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pKegmi6QBg ++ mktemp + local LAST_ERR=/tmp/tmp.vpuD4cTSab + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pKegmi6QBg namespace/psmdb-operator created + cat /tmp/tmp.vpuD4cTSab + rm /tmp/tmp.pKegmi6QBg /tmp/tmp.vpuD4cTSab + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.NLdWitZdcq +++ mktemp ++ local LAST_ERR=/tmp/tmp.SCuSSpSrSU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NLdWitZdcq ++ cat /tmp/tmp.SCuSSpSrSU ++ rm /tmp/tmp.NLdWitZdcq /tmp/tmp.SCuSSpSrSU ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-d6e9d6b1-1-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.E7wiAVHH7s ++ mktemp + local LAST_ERR=/tmp/tmp.Xgqag49ese + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-d6e9d6b1-1-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E7wiAVHH7s Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-d6e9d6b1-1-cluster8" modified. + cat /tmp/tmp.Xgqag49ese + rm /tmp/tmp.E7wiAVHH7s /tmp/tmp.Xgqag49ese + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.lRiUYusQpN ++ mktemp + local LAST_ERR=/tmp/tmp.SHpMvS92bO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lRiUYusQpN customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.SHpMvS92bO + rm /tmp/tmp.lRiUYusQpN /tmp/tmp.SHpMvS92bO + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ONxbTK547s ++ mktemp + local LAST_ERR=/tmp/tmp.KtwwNm0cxa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ONxbTK547s clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.KtwwNm0cxa + rm /tmp/tmp.ONxbTK547s /tmp/tmp.KtwwNm0cxa + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1917-d6e9d6b1") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jFDA0vc7S4 ++ mktemp + local LAST_ERR=/tmp/tmp.594dDt9ZrQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jFDA0vc7S4 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.594dDt9ZrQ + rm /tmp/tmp.jFDA0vc7S4 /tmp/tmp.594dDt9ZrQ + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.8B9AXAqqXX +++ mktemp ++ local LAST_ERR=/tmp/tmp.TbuG9fd1QP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8B9AXAqqXX ++ cat /tmp/tmp.TbuG9fd1QP ++ rm /tmp/tmp.8B9AXAqqXX /tmp/tmp.TbuG9fd1QP ++ return 0 + wait_pod percona-server-mongodb-operator-758b744857-h72p4 + local pod=percona-server-mongodb-operator-758b744857-h72p4 + set +o xtrace waiting for pod/percona-server-mongodb-operator-758b744857-h72p4 to be ready.OK + create_namespace data-at-rest-encryption-19823 + local namespace=data-at-rest-encryption-19823 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces data-at-rest-encryption-19823' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces data-at-rest-encryption-19823 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace data-at-rest-encryption-19823 --ignore-not-found + awk '{print$1}' ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.dIIryYHizP ++ mktemp + local LAST_OUT=/tmp/tmp.wLMKtsHCUW ++ mktemp + local LAST_ERR=/tmp/tmp.7hwv4KIv0U + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.o6qKjtAcZw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace data-at-rest-encryption-19823 --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + xargs kubectl delete ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wLMKtsHCUW + cat /tmp/tmp.o6qKjtAcZw + rm /tmp/tmp.wLMKtsHCUW /tmp/tmp.o6qKjtAcZw + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dIIryYHizP + cat /tmp/tmp.7hwv4KIv0U + rm /tmp/tmp.dIIryYHizP /tmp/tmp.7hwv4KIv0U + return 0 + kubectl_bin wait --for=delete namespace data-at-rest-encryption-19823 ++ mktemp + local LAST_OUT=/tmp/tmp.zI0PSy2NsS ++ mktemp + local LAST_ERR=/tmp/tmp.C4rTFeboda + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace data-at-rest-encryption-19823 namespace "gke-managed-cim" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zI0PSy2NsS + cat /tmp/tmp.C4rTFeboda + rm /tmp/tmp.zI0PSy2NsS /tmp/tmp.C4rTFeboda + return 0 + desc 'create namespace data-at-rest-encryption-19823' + set +o xtrace ----------------------------------------------------------------------------------- create namespace data-at-rest-encryption-19823 ----------------------------------------------------------------------------------- + kubectl_bin create namespace data-at-rest-encryption-19823 ++ mktemp + local LAST_OUT=/tmp/tmp.eN6kDTF54n ++ mktemp + local LAST_ERR=/tmp/tmp.87Of8mEkx9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace data-at-rest-encryption-19823 namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eN6kDTF54n namespace/data-at-rest-encryption-19823 created + cat /tmp/tmp.87Of8mEkx9 + rm /tmp/tmp.eN6kDTF54n /tmp/tmp.87Of8mEkx9 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WFPZAqF3ON +++ mktemp ++ local LAST_ERR=/tmp/tmp.oPPK97Wf4J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WFPZAqF3ON ++ cat /tmp/tmp.oPPK97Wf4J ++ rm /tmp/tmp.WFPZAqF3ON /tmp/tmp.oPPK97Wf4J ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-d6e9d6b1-1-cluster8 --namespace=data-at-rest-encryption-19823 ++ mktemp + local LAST_OUT=/tmp/tmp.9APk2tULsa ++ mktemp + local LAST_ERR=/tmp/tmp.zxR1fpNCtX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-d6e9d6b1-1-cluster8 --namespace=data-at-rest-encryption-19823 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9APk2tULsa Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1917-d6e9d6b1-1-cluster8" modified. + cat /tmp/tmp.zxR1fpNCtX + rm /tmp/tmp.9APk2tULsa /tmp/tmp.zxR1fpNCtX + return 0 + deploy_vault + name=vault-service + desc 'install Vault vault-service' + set +o xtrace ----------------------------------------------------------------------------------- install Vault vault-service ----------------------------------------------------------------------------------- + helm uninstall vault-service Error: uninstall: Release not loaded: vault-service: release: not found + : + helm repo remove hashicorp "hashicorp" has been removed from your repositories + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" has been added to your repositories + destroy_vault ++ helm list --all-namespaces --filter vault-service ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local vault_ns= + desc 'destroy vault' + set +o xtrace ----------------------------------------------------------------------------------- destroy vault ----------------------------------------------------------------------------------- ++ kubectl api-resources ++ grep vault ++ awk '{print $1}' + '[' -n '' ']' ++ kubectl get crd ++ grep vault ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep vault ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding vault-service-agent-injector-binding vault-service-server-binding clusterrolebinding.rbac.authorization.k8s.io "vault-service-agent-injector-binding" deleted clusterrolebinding.rbac.authorization.k8s.io "vault-service-server-binding" deleted ++ kubectl get clusterrole ++ grep vault ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole vault-service-agent-injector-clusterrole clusterrole.rbac.authorization.k8s.io "vault-service-agent-injector-clusterrole" deleted ++ kubectl get mutatingwebhookconfiguration ++ grep vault ++ awk '{print $1}' + timeout 30 kubectl delete mutatingwebhookconfiguration vault-service-agent-injector-cfg mutatingwebhookconfiguration.admissionregistration.k8s.io "vault-service-agent-injector-cfg" deleted + [[ -n '' ]] + retry 10 60 helm install vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false + local max=10 + local delay=60 + shift 2 + local n=1 + helm install vault-service hashicorp/vault --disable-openapi-validation --set dataStorage.enabled=false NAME: vault-service LAST DEPLOYED: Mon May 12 12:29:09 2025 NAMESPACE: data-at-rest-encryption-19823 STATUS: deployed REVISION: 1 NOTES: Thank you for installing HashiCorp Vault! Now that you have deployed Vault, you should look over the docs on using Vault with Kubernetes available here: https://developer.hashicorp.com/vault/docs Your release is named vault-service. To learn more about the release, try: $ helm status vault-service $ helm get manifest vault-service + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + grep Running + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + grep Running + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' + grep Running + sleep 1 + grep Running + kubectl_bin get pod/vault-service-0 -o 'jsonpath={.status.phase}' Running + kubectl_bin exec pod/vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json ++ mktemp + local LAST_OUT=/tmp/tmp.WbuKi44TiC ++ mktemp + local LAST_ERR=/tmp/tmp.YQJmZAoQ9i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec pod/vault-service-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WbuKi44TiC + cat /tmp/tmp.YQJmZAoQ9i + rm /tmp/tmp.WbuKi44TiC /tmp/tmp.YQJmZAoQ9i + return 0 ++ jq -r '.unseal_keys_b64[]' + local unsealKey=+LH6ktMynUBD39S2UY0Yr4tyZABNQ2Spqe3yB/XszR8= ++ jq -r .root_token + local token=hvs.FItxCOlQP6XDPYaNhVA4JYUD + kubectl_bin exec pod/vault-service-0 -- vault operator unseal +LH6ktMynUBD39S2UY0Yr4tyZABNQ2Spqe3yB/XszR8= ++ mktemp + local LAST_OUT=/tmp/tmp.WiQhCeQHrk ++ mktemp + local LAST_ERR=/tmp/tmp.Z3bTZ0y78N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec pod/vault-service-0 -- vault operator unseal +LH6ktMynUBD39S2UY0Yr4tyZABNQ2Spqe3yB/XszR8= + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WiQhCeQHrk Key Value --- ----- Seal Type shamir Initialized true Sealed false Total Shares 1 Threshold 1 Version 1.19.0 Build Date 2025-03-04T12:36:40Z Storage Type file Cluster Name vault-cluster-5bb66a65 Cluster ID a344b70d-8376-9eaa-7fb3-3b1311f2c445 HA Enabled false + cat /tmp/tmp.Z3bTZ0y78N + rm /tmp/tmp.WiQhCeQHrk /tmp/tmp.Z3bTZ0y78N + return 0 + kubectl_bin exec -it pod/vault-service-0 -- sh ++ mktemp + local LAST_OUT=/tmp/tmp.2WAfwmQ01M ++ mktemp + local LAST_ERR=/tmp/tmp.JMBFACjqSy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec -it pod/vault-service-0 -- sh + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2WAfwmQ01M Success! You are now authenticated. The token information displayed below is already stored in the token helper. You do NOT need to run "vault login" again. Future Vault requests will automatically use this token. Key Value --- ----- token hvs.FItxCOlQP6XDPYaNhVA4JYUD token_accessor c5myE3gWhajJlhtw2nHhu7Wv token_duration ∞ token_renewable false token_policies ["root"] identity_policies [] policies ["root"] Success! Enabled the kv-v2 secrets engine at: secret/ + cat /tmp/tmp.JMBFACjqSy Unable to use a TTY - input is not a terminal or the right kind of file + rm /tmp/tmp.2WAfwmQ01M /tmp/tmp.JMBFACjqSy + return 0 + kubectl_bin create secret generic vault-secret --from-literal=token=hvs.FItxCOlQP6XDPYaNhVA4JYUD ++ mktemp + local LAST_OUT=/tmp/tmp.jFuvE0Lucc ++ mktemp + local LAST_ERR=/tmp/tmp.8gKmakXwk8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create secret generic vault-secret --from-literal=token=hvs.FItxCOlQP6XDPYaNhVA4JYUD + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jFuvE0Lucc secret/vault-secret created + cat /tmp/tmp.8gKmakXwk8 + rm /tmp/tmp.jFuvE0Lucc /tmp/tmp.8gKmakXwk8 + return 0 + deploy_minio + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + helm repo remove minio "minio" has been removed from your repositories + helm repo add minio https://charts.min.io/ "minio" has been added to your repositories + retry 10 60 helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio NAME: minio-service LAST DEPLOYED: Mon May 12 12:29:51 2025 NAMESPACE: data-at-rest-encryption-19823 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.data-at-rest-encryption-19823.svc.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace data-at-rest-encryption-19823 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace data-at-rest-encryption-19823 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace data-at-rest-encryption-19823 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace data-at-rest-encryption-19823 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HV9XJUG2g1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zDG7ghQw3q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HV9XJUG2g1 ++ cat /tmp/tmp.zDG7ghQw3q ++ rm /tmp/tmp.HV9XJUG2g1 /tmp/tmp.zDG7ghQw3q ++ return 0 + MINIO_POD=minio-service-8967c7f7f-vppmb + wait_pod minio-service-8967c7f7f-vppmb + local pod=minio-service-8967c7f7f-vppmb + set +o xtrace waiting for pod/minio-service-8967c7f7f-vppmb to be ready.OK + '[' -n psmdb-operator ']' + kubectl_bin create svc -n psmdb-operator externalname minio-service --external-name=minio-service.data-at-rest-encryption-19823.svc.cluster.local --tcp=9000 ++ mktemp + local LAST_OUT=/tmp/tmp.J36El4ffvf ++ mktemp + local LAST_ERR=/tmp/tmp.k9WmSB8INL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create svc -n psmdb-operator externalname minio-service --external-name=minio-service.data-at-rest-encryption-19823.svc.cluster.local --tcp=9000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J36El4ffvf service/minio-service created + cat /tmp/tmp.k9WmSB8INL + rm /tmp/tmp.J36El4ffvf /tmp/tmp.k9WmSB8INL + return 0 + create_minio_bucket operator-testing + local bucket=operator-testing + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' ++ mktemp + local LAST_OUT=/tmp/tmp.wHgwCUleUX ++ mktemp + local LAST_ERR=/tmp/tmp.v4bI8XSp9g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wHgwCUleUX make_bucket: operator-testing pod "aws-cli" deleted + cat /tmp/tmp.v4bI8XSp9g + rm /tmp/tmp.wHgwCUleUX /tmp/tmp.v4bI8XSp9g + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.VlIhvKUrUA ++ mktemp + local LAST_ERR=/tmp/tmp.XxN4izKu9X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VlIhvKUrUA secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.XxN4izKu9X + rm /tmp/tmp.VlIhvKUrUA /tmp/tmp.XxN4izKu9X + return 0 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.RJd61Wb0jy ++ mktemp + local LAST_ERR=/tmp/tmp.YWiCbfqHg9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RJd61Wb0jy secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.YWiCbfqHg9 + rm /tmp/tmp.RJd61Wb0jy /tmp/tmp.YWiCbfqHg9 + return 0 + cluster=some-name + desc 'create PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.Z24OgAh4n0 + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1917-d6e9d6b1"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.7KgU4PYSXZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z24OgAh4n0 perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.7KgU4PYSXZ + rm /tmp/tmp.Z24OgAh4n0 /tmp/tmp.7KgU4PYSXZ + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 false + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ST1SfOKZFg +++ mktemp ++ local LAST_ERR=/tmp/tmp.kCMKh4gIci ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ST1SfOKZFg ++ cat /tmp/tmp.kCMKh4gIci ++ rm /tmp/tmp.ST1SfOKZFg /tmp/tmp.kCMKh4gIci ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready......OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gHsdoOObjj +++ mktemp ++ local LAST_ERR=/tmp/tmp.NT3G8QrrMQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gHsdoOObjj ++ cat /tmp/tmp.NT3G8QrrMQ ++ rm /tmp/tmp.gHsdoOObjj /tmp/tmp.NT3G8QrrMQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SMfGlIx5fd +++ mktemp ++ local LAST_ERR=/tmp/tmp.lVX3BWaoqL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SMfGlIx5fd ++ cat /tmp/tmp.lVX3BWaoqL ++ rm /tmp/tmp.SMfGlIx5fd /tmp/tmp.lVX3BWaoqL ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WWCWDxmedZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.gPxs98IkCm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WWCWDxmedZ ++ cat /tmp/tmp.gPxs98IkCm ++ rm /tmp/tmp.WWCWDxmedZ /tmp/tmp.gPxs98IkCm ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.....OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wdUp6z4qVX +++ mktemp ++ local LAST_ERR=/tmp/tmp.omqwqjJFtd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wdUp6z4qVX ++ cat /tmp/tmp.omqwqjJFtd ++ rm /tmp/tmp.wdUp6z4qVX /tmp/tmp.omqwqjJFtd ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YL889xDSPK +++ mktemp ++ local LAST_ERR=/tmp/tmp.t7ui2fxmaH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YL889xDSPK ++ cat /tmp/tmp.t7ui2fxmaH ++ rm /tmp/tmp.YL889xDSPK /tmp/tmp.t7ui2fxmaH ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.cunLCQHFXw/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("data-at-rest-encryption-19823", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.3s2VFlDlJR ++ mktemp + local LAST_ERR=/tmp/tmp.H0FkKhAeMH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3s2VFlDlJR + cat /tmp/tmp.H0FkKhAeMH + rm /tmp/tmp.3s2VFlDlJR /tmp/tmp.H0FkKhAeMH + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.cunLCQHFXw/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.cunLCQHFXw/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.cunLCQHFXw/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0.yml /tmp/tmp.cunLCQHFXw/statefulset_some-name-rs0.yml --- /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1917/e2e-tests/data-at-rest-encryption/compare/statefulset_some-name-rs0.yml 2025-05-12 12:04:34.288097170 +0000 +++ /tmp/tmp.cunLCQHFXw/statefulset_some-name-rs0.yml 2025-05-12 12:33:07.988618693 +0000 @@ -102,12 +102,6 @@ - readiness - --component - mongod - - --ssl - - --sslInsecure - - --sslCAFile - - /etc/mongodb-ssl/ca.crt - - --sslPEMKeyFile - - /tmp/tls.pem failureThreshold: 8 initialDelaySeconds: 10 periodSeconds: 3