Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/logs/tls-issue-cert-manager.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra tls-issue-cert-manager-18440 + local ns=tls-issue-cert-manager-18440 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.vYvgwytuDd ++ mktemp + local LAST_ERR=/tmp/tmp.WzFKhsoHYs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vYvgwytuDd customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.WzFKhsoHYs + rm /tmp/tmp.vYvgwytuDd /tmp/tmp.WzFKhsoHYs + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.NdMrGd04U8 ++ mktemp + local LAST_ERR=/tmp/tmp.SnVFR0yVkq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NdMrGd04U8 + cat /tmp/tmp.SnVFR0yVkq + rm /tmp/tmp.NdMrGd04U8 /tmp/tmp.SnVFR0yVkq + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.EM5ZNsEJNV ++ mktemp + local LAST_ERR=/tmp/tmp.IrfeiuhJm2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EM5ZNsEJNV + cat /tmp/tmp.IrfeiuhJm2 + rm /tmp/tmp.EM5ZNsEJNV /tmp/tmp.IrfeiuhJm2 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Po5ioguYp9 ++ mktemp + local LAST_ERR=/tmp/tmp.LUB5Zl5UCm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Po5ioguYp9 + cat /tmp/tmp.LUB5Zl5UCm + rm /tmp/tmp.Po5ioguYp9 /tmp/tmp.LUB5Zl5UCm + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.pFbIsSSkDf ++ mktemp + local LAST_ERR=/tmp/tmp.g8bW1EZqMR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pFbIsSSkDf clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted serviceaccount "percona-server-mongodb-operator" deleted from psmdb-operator namespace clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.g8bW1EZqMR + rm /tmp/tmp.pFbIsSSkDf /tmp/tmp.g8bW1EZqMR + return 0 + check_crd_for_deletion PR-2148-fce4e0a0 + local git_tag=PR-2148-fce4e0a0 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2148-fce4e0a0/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xTANK6kWwz +++ mktemp ++ local LAST_ERR=/tmp/tmp.6WJoQytG3O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xTANK6kWwz ++ cat /tmp/tmp.6WJoQytG3O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xTANK6kWwz ++ cat /tmp/tmp.6WJoQytG3O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xTANK6kWwz ++ cat /tmp/tmp.6WJoQytG3O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.xTANK6kWwz ++ cat /tmp/tmp.6WJoQytG3O Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.xTANK6kWwz /tmp/tmp.6WJoQytG3O ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.N5zxgS8ONc egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.p1v2lZHDx7 ++ mktemp + local LAST_ERR=/tmp/tmp.IERmTgGyYO + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.rpgn2Od0AL + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) ++ seq 0 2 + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N5zxgS8ONc + cat /tmp/tmp.IERmTgGyYO + rm /tmp/tmp.N5zxgS8ONc /tmp/tmp.IERmTgGyYO + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p1v2lZHDx7 namespace "psmdb-operator" deleted + cat /tmp/tmp.rpgn2Od0AL + rm /tmp/tmp.p1v2lZHDx7 /tmp/tmp.rpgn2Od0AL + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.5yfezGJwSy ++ mktemp + local LAST_ERR=/tmp/tmp.dBGFcCUKt7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5yfezGJwSy + cat /tmp/tmp.dBGFcCUKt7 + rm /tmp/tmp.5yfezGJwSy /tmp/tmp.dBGFcCUKt7 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.HxOsGK1NQW ++ mktemp + local LAST_ERR=/tmp/tmp.YrDQNe2U1I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HxOsGK1NQW namespace/psmdb-operator created + cat /tmp/tmp.YrDQNe2U1I + rm /tmp/tmp.HxOsGK1NQW /tmp/tmp.YrDQNe2U1I + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6niq7vM0Np +++ mktemp ++ local LAST_ERR=/tmp/tmp.knclYi4YTW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6niq7vM0Np ++ cat /tmp/tmp.knclYi4YTW ++ rm /tmp/tmp.6niq7vM0Np /tmp/tmp.knclYi4YTW ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2148-fce4e0a0-5-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.caBeVh1sPm ++ mktemp + local LAST_ERR=/tmp/tmp.L4rBuq5vrH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2148-fce4e0a0-5-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.caBeVh1sPm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2148-fce4e0a0-5-cluster4" modified. + cat /tmp/tmp.L4rBuq5vrH + rm /tmp/tmp.caBeVh1sPm /tmp/tmp.L4rBuq5vrH + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2148-fce4e0a0' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2148-fce4e0a0 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TZVz4Tb6DO ++ mktemp + local LAST_ERR=/tmp/tmp.RqZcVNCXfb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TZVz4Tb6DO customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.RqZcVNCXfb + rm /tmp/tmp.TZVz4Tb6DO /tmp/tmp.RqZcVNCXfb + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.wJt6GUWxnt ++ mktemp + local LAST_ERR=/tmp/tmp.fHGQccLeYw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wJt6GUWxnt clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.fHGQccLeYw + rm /tmp/tmp.wJt6GUWxnt /tmp/tmp.fHGQccLeYw + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2148-fce4e0a0") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2OKEyOO1js ++ mktemp + local LAST_ERR=/tmp/tmp.5MztdfQ4AL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2OKEyOO1js deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.5MztdfQ4AL + rm /tmp/tmp.2OKEyOO1js /tmp/tmp.5MztdfQ4AL + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.vxMhSehDVp +++ mktemp ++ local LAST_ERR=/tmp/tmp.uH8IapuMgz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vxMhSehDVp ++ cat /tmp/tmp.uH8IapuMgz ++ rm /tmp/tmp.vxMhSehDVp /tmp/tmp.uH8IapuMgz ++ return 0 + wait_operator_pod percona-server-mongodb-operator-64576c55fb-jxjqq + local pod=percona-server-mongodb-operator-64576c55fb-jxjqq + set +o xtrace waiting for pod/percona-server-mongodb-operator-64576c55fb-jxjqq to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wxvxk1lEIr +++ mktemp ++ local LAST_ERR=/tmp/tmp.8SJur0k2hH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wxvxk1lEIr ++ cat /tmp/tmp.8SJur0k2hH ++ rm /tmp/tmp.Wxvxk1lEIr /tmp/tmp.8SJur0k2hH ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-64576c55fb-jxjqq ++ mktemp + local LAST_OUT=/tmp/tmp.zyu8oDeB9U ++ mktemp + local LAST_ERR=/tmp/tmp.laGXwK0pJu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-64576c55fb-jxjqq + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zyu8oDeB9U + cat /tmp/tmp.laGXwK0pJu + rm /tmp/tmp.zyu8oDeB9U /tmp/tmp.laGXwK0pJu + return 0 2025-12-17T12:38:05.382Z INFO setup Manager starting up {"gitCommit": "fce4e0a0cd8e3f0b4555d9ab1e9383976f297a6f", "gitBranch": "PR-2148-fce4e0a0", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace tls-issue-cert-manager-18440 + local namespace=tls-issue-cert-manager-18440 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces tls-issue-cert-manager-18440' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces tls-issue-cert-manager-18440 ----------------------------------------------------------------------------------- ++ mktemp + kubectl_bin delete namespace tls-issue-cert-manager-18440 --ignore-not-found egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.jQkciDXkWh ++ mktemp + local LAST_OUT=/tmp/tmp.LXScL0BrgE + local LAST_ERR=/tmp/tmp.ZNlBHiu5sy + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.UX42oUM2Ss + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace tls-issue-cert-manager-18440 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jQkciDXkWh + cat /tmp/tmp.ZNlBHiu5sy + rm /tmp/tmp.jQkciDXkWh /tmp/tmp.ZNlBHiu5sy + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LXScL0BrgE + cat /tmp/tmp.UX42oUM2Ss + rm /tmp/tmp.LXScL0BrgE /tmp/tmp.UX42oUM2Ss + return 0 + kubectl_bin wait --for=delete namespace tls-issue-cert-manager-18440 ++ mktemp + local LAST_OUT=/tmp/tmp.PqaQgTdrJl ++ mktemp + local LAST_ERR=/tmp/tmp.nmXYjVF4aY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace tls-issue-cert-manager-18440 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PqaQgTdrJl + cat /tmp/tmp.nmXYjVF4aY + rm /tmp/tmp.PqaQgTdrJl /tmp/tmp.nmXYjVF4aY + return 0 + desc 'create namespace tls-issue-cert-manager-18440' + set +o xtrace ----------------------------------------------------------------------------------- create namespace tls-issue-cert-manager-18440 ----------------------------------------------------------------------------------- + kubectl_bin create namespace tls-issue-cert-manager-18440 ++ mktemp + local LAST_OUT=/tmp/tmp.lnI1QNpjvQ ++ mktemp + local LAST_ERR=/tmp/tmp.f86fI5RoPP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace tls-issue-cert-manager-18440 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lnI1QNpjvQ namespace/tls-issue-cert-manager-18440 created + cat /tmp/tmp.f86fI5RoPP + rm /tmp/tmp.lnI1QNpjvQ /tmp/tmp.f86fI5RoPP + return 0 + set_kube_ctx tls-issue-cert-manager-18440 + local namespace=tls-issue-cert-manager-18440 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.xniRv2mbo5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.muqW6j9OIc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xniRv2mbo5 ++ cat /tmp/tmp.muqW6j9OIc ++ rm /tmp/tmp.xniRv2mbo5 /tmp/tmp.muqW6j9OIc ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2148-fce4e0a0-5-cluster4 --namespace=tls-issue-cert-manager-18440 ++ mktemp + local LAST_OUT=/tmp/tmp.qk6tG4XeoM ++ mktemp + local LAST_ERR=/tmp/tmp.wRzyCWXWLD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2148-fce4e0a0-5-cluster4 --namespace=tls-issue-cert-manager-18440 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qk6tG4XeoM Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2148-fce4e0a0-5-cluster4" modified. + cat /tmp/tmp.wRzyCWXWLD + rm /tmp/tmp.qk6tG4XeoM /tmp/tmp.wRzyCWXWLD + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.kl44DwethH ++ mktemp + local LAST_ERR=/tmp/tmp.rD7aRltSb8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kl44DwethH namespace/cert-manager created + cat /tmp/tmp.rD7aRltSb8 + rm /tmp/tmp.kl44DwethH /tmp/tmp.rD7aRltSb8 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.RWcF9stSBH ++ mktemp + local LAST_ERR=/tmp/tmp.byxDmQUaCN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RWcF9stSBH namespace/cert-manager labeled + cat /tmp/tmp.byxDmQUaCN + rm /tmp/tmp.RWcF9stSBH /tmp/tmp.byxDmQUaCN + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.T930M9jzxk ++ mktemp + local LAST_ERR=/tmp/tmp.dGwbsnqCKU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T930M9jzxk namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io created serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view created clusterrole.rbac.authorization.k8s.io/cert-manager-view created clusterrole.rbac.authorization.k8s.io/cert-manager-edit created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests created clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews created role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created role.rbac.authorization.k8s.io/cert-manager:leaderelection created role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection created rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook created + cat /tmp/tmp.dGwbsnqCKU Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.T930M9jzxk /tmp/tmp.dGwbsnqCKU + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.1cbr97MmPv ++ mktemp + local LAST_ERR=/tmp/tmp.xLNnrj2g1V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1cbr97MmPv pod/cert-manager-cainjector-5dc9c8b4f7-899mb condition met pod/cert-manager-df4b69479-s46s4 condition met pod/cert-manager-webhook-769bbb594d-hllfc condition met + cat /tmp/tmp.xLNnrj2g1V + rm /tmp/tmp.1cbr97MmPv /tmp/tmp.xLNnrj2g1V + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WXk37g8l1z ++ mktemp + local LAST_ERR=/tmp/tmp.gd92zHqSF6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WXk37g8l1z secret/some-users created + cat /tmp/tmp.gd92zHqSF6 + rm /tmp/tmp.WXk37g8l1z /tmp/tmp.gd92zHqSF6 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.t7aXGhwiiR ++ mktemp + local LAST_ERR=/tmp/tmp.WCLvxAz70h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t7aXGhwiiR deployment.apps/psmdb-client created + cat /tmp/tmp.WCLvxAz70h + rm /tmp/tmp.t7aXGhwiiR /tmp/tmp.WCLvxAz70h + return 0 + desc 'create custom cert-manager issuers and certificates' + set +o xtrace ----------------------------------------------------------------------------------- create custom cert-manager issuers and certificates ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ItMqxnEJCm ++ mktemp + local LAST_ERR=/tmp/tmp.Yu7JH9eOJo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ItMqxnEJCm issuer.cert-manager.io/some-name-psmdb-ca-issuer created + cat /tmp/tmp.Yu7JH9eOJo + rm /tmp/tmp.ItMqxnEJCm /tmp/tmp.Yu7JH9eOJo + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.YF6KeJ6VTk ++ mktemp + local LAST_ERR=/tmp/tmp.pKpJGRnP2d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YF6KeJ6VTk issuer.cert-manager.io/some-name-psmdb-issuer created + cat /tmp/tmp.pKpJGRnP2d + rm /tmp/tmp.YF6KeJ6VTk /tmp/tmp.pKpJGRnP2d + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml ++ mktemp + local LAST_OUT=/tmp/tmp.p5YGfneJQV ++ mktemp + local LAST_ERR=/tmp/tmp.A29l7oneON + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p5YGfneJQV certificate.cert-manager.io/some-name-ca-cert created + cat /tmp/tmp.A29l7oneON Warning: spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. + rm /tmp/tmp.p5YGfneJQV /tmp/tmp.A29l7oneON + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ySxqe4gLch ++ mktemp + local LAST_ERR=/tmp/tmp.vgIxg6VGyM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ySxqe4gLch certificate.cert-manager.io/some-name-ssl-internal created + cat /tmp/tmp.vgIxg6VGyM Warning: spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. + rm /tmp/tmp.ySxqe4gLch /tmp/tmp.vgIxg6VGyM + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.T16PoZHrbZ ++ mktemp + local LAST_ERR=/tmp/tmp.10BcZNpMfw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T16PoZHrbZ certificate.cert-manager.io/some-name-ssl created + cat /tmp/tmp.10BcZNpMfw Warning: spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. + rm /tmp/tmp.T16PoZHrbZ /tmp/tmp.10BcZNpMfw + return 0 + deploy_cmctl + local service_account=cmctl + /usr/sbin/sed -e s/percona-server-mongodb-operator/cmctl/g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/rbac.yaml + yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.73sIBjcUOV ++ mktemp + local LAST_ERR=/tmp/tmp.1F5gBVAsmn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.73sIBjcUOV role.rbac.authorization.k8s.io/cmctl created serviceaccount/cmctl created rolebinding.rbac.authorization.k8s.io/service-account-cmctl created + cat /tmp/tmp.1F5gBVAsmn + rm /tmp/tmp.73sIBjcUOV /tmp/tmp.1F5gBVAsmn + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/conf/cmctl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.G8n4NJrZX1 ++ mktemp + local LAST_ERR=/tmp/tmp.0DYrlXE4lW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/conf/cmctl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G8n4NJrZX1 deployment.apps/cmctl created + cat /tmp/tmp.0DYrlXE4lW + rm /tmp/tmp.G8n4NJrZX1 /tmp/tmp.0DYrlXE4lW + return 0 + sleep 60 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2148-fce4e0a0"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.YXNfr5m3aP ++ mktemp + local LAST_ERR=/tmp/tmp.zFGRE35MUN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YXNfr5m3aP perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.zFGRE35MUN + rm /tmp/tmp.YXNfr5m3aP /tmp/tmp.zFGRE35MUN + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZvhWODSjQu +++ mktemp ++ local LAST_ERR=/tmp/tmp.1z11yD4PrN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZvhWODSjQu ++ cat /tmp/tmp.1z11yD4PrN ++ rm /tmp/tmp.ZvhWODSjQu /tmp/tmp.1z11yD4PrN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f0Cv1LZ6P1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.suP5B3YriT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f0Cv1LZ6P1 ++ cat /tmp/tmp.suP5B3YriT ++ rm /tmp/tmp.f0Cv1LZ6P1 /tmp/tmp.suP5B3YriT ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zdCcUoQ5nN +++ mktemp ++ local LAST_ERR=/tmp/tmp.eC2yLHoPNC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zdCcUoQ5nN ++ cat /tmp/tmp.eC2yLHoPNC ++ rm /tmp/tmp.zdCcUoQ5nN /tmp/tmp.eC2yLHoPNC ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LyQcDuk4m8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EbB0JcHDvs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LyQcDuk4m8 ++ cat /tmp/tmp.EbB0JcHDvs ++ rm /tmp/tmp.LyQcDuk4m8 /tmp/tmp.EbB0JcHDvs ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vryWCWo7QP +++ mktemp ++ local LAST_ERR=/tmp/tmp.9SegPo3TEm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vryWCWo7QP ++ cat /tmp/tmp.9SegPo3TEm ++ rm /tmp/tmp.vryWCWo7QP /tmp/tmp.9SegPo3TEm ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.A5ZYTcP18J +++ mktemp ++ local LAST_ERR=/tmp/tmp.1VBdLlUfjD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A5ZYTcP18J ++ cat /tmp/tmp.1VBdLlUfjD ++ rm /tmp/tmp.A5ZYTcP18J /tmp/tmp.1VBdLlUfjD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t1UgkadGsj +++ mktemp ++ local LAST_ERR=/tmp/tmp.ayD8kstwD6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t1UgkadGsj ++ cat /tmp/tmp.ayD8kstwD6 ++ rm /tmp/tmp.t1UgkadGsj /tmp/tmp.ayD8kstwD6 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i5XVWMbWV8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.lgzRCsHBWE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i5XVWMbWV8 ++ cat /tmp/tmp.lgzRCsHBWE ++ rm /tmp/tmp.i5XVWMbWV8 /tmp/tmp.lgzRCsHBWE ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Og9SXPcipp +++ mktemp ++ local LAST_ERR=/tmp/tmp.8ZpnHJKwK6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Og9SXPcipp ++ cat /tmp/tmp.8ZpnHJKwK6 ++ rm /tmp/tmp.Og9SXPcipp /tmp/tmp.8ZpnHJKwK6 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'compare custom certificates and issuers' + set +o xtrace ----------------------------------------------------------------------------------- compare custom certificates and issuers ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl -custom + local resource=certificate/some-name-ssl + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml + local new_result=/tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.5OybEldo6V ++ mktemp + local LAST_ERR=/tmp/tmp.7Qa8qm4ziU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5OybEldo6V + cat /tmp/tmp.7Qa8qm4ziU + rm /tmp/tmp.5OybEldo6V /tmp/tmp.7Qa8qm4ziU + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + log 'compare_kubectl: certificate/some-name-ssl OK' + set +o xtrace [2025-12-17T12:45:35+0000] compare_kubectl: certificate/some-name-ssl OK + compare_kubectl certificate/some-name-ssl-internal -custom + local resource=certificate/some-name-ssl-internal + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml + local new_result=/tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.bjxwPmSjeu ++ mktemp + local LAST_ERR=/tmp/tmp.XjeuT44WQ9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bjxwPmSjeu + cat /tmp/tmp.XjeuT44WQ9 + rm /tmp/tmp.bjxwPmSjeu /tmp/tmp.XjeuT44WQ9 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + log 'compare_kubectl: certificate/some-name-ssl-internal OK' + set +o xtrace [2025-12-17T12:45:36+0000] compare_kubectl: certificate/some-name-ssl-internal OK + compare_kubectl certificate/some-name-ca-cert -custom + local resource=certificate/some-name-ca-cert + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml + local new_result=/tmp/tmp.pN37kVL2rr/certificate_some-name-ca-cert.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ca-cert ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.OuL92Op7v7 ++ mktemp + local LAST_ERR=/tmp/tmp.RpnjfwvFZH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ca-cert + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OuL92Op7v7 + cat /tmp/tmp.RpnjfwvFZH + rm /tmp/tmp.OuL92Op7v7 /tmp/tmp.RpnjfwvFZH + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ca-cert.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ca-cert.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ca-cert.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml /tmp/tmp.pN37kVL2rr/certificate_some-name-ca-cert.yml + log 'compare_kubectl: certificate/some-name-ca-cert OK' + set +o xtrace [2025-12-17T12:45:37+0000] compare_kubectl: certificate/some-name-ca-cert OK + compare_kubectl issuer/some-name-psmdb-ca-issuer -custom + local resource=issuer/some-name-psmdb-ca-issuer + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml + local new_result=/tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ZnuDp7KPbg ++ mktemp + local LAST_ERR=/tmp/tmp.dWiW42T8dD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZnuDp7KPbg + cat /tmp/tmp.dWiW42T8dD + rm /tmp/tmp.ZnuDp7KPbg /tmp/tmp.dWiW42T8dD + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-ca-issuer OK' + set +o xtrace [2025-12-17T12:45:38+0000] compare_kubectl: issuer/some-name-psmdb-ca-issuer OK + compare_kubectl issuer/some-name-psmdb-issuer -custom + local resource=issuer/some-name-psmdb-issuer + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml + local new_result=/tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.YBW4Ohv8Hs ++ mktemp + local LAST_ERR=/tmp/tmp.ZaPOftxeRV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YBW4Ohv8Hs + cat /tmp/tmp.ZaPOftxeRV + rm /tmp/tmp.YBW4Ohv8Hs /tmp/tmp.ZaPOftxeRV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-issuer OK' + set +o xtrace [2025-12-17T12:45:40+0000] compare_kubectl: issuer/some-name-psmdb-issuer OK + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl delete psmdb --all perconaservermongodb.psmdb.percona.com "some-name" deleted from tls-issue-cert-manager-18440 namespace + wait_for_delete psmdb/some-name 180 + local res=psmdb/some-name + local wait_time=180 + set +o xtrace waiting for psmdb/some-name to be deletedError from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found + kubectl delete pvc --all persistentvolumeclaim "mongod-data-some-name-cfg-0" deleted from tls-issue-cert-manager-18440 namespace persistentvolumeclaim "mongod-data-some-name-cfg-1" deleted from tls-issue-cert-manager-18440 namespace persistentvolumeclaim "mongod-data-some-name-cfg-2" deleted from tls-issue-cert-manager-18440 namespace persistentvolumeclaim "mongod-data-some-name-rs0-0" deleted from tls-issue-cert-manager-18440 namespace persistentvolumeclaim "mongod-data-some-name-rs0-1" deleted from tls-issue-cert-manager-18440 namespace persistentvolumeclaim "mongod-data-some-name-rs0-2" deleted from tls-issue-cert-manager-18440 namespace + desc 'delete custom cert-manager issuers and certificates' + set +o xtrace ----------------------------------------------------------------------------------- delete custom cert-manager issuers and certificates ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.p3PPihWbsp ++ mktemp + local LAST_ERR=/tmp/tmp.IHKSvdUj9q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p3PPihWbsp issuer.cert-manager.io "some-name-psmdb-ca-issuer" deleted from tls-issue-cert-manager-18440 namespace + cat /tmp/tmp.IHKSvdUj9q + rm /tmp/tmp.p3PPihWbsp /tmp/tmp.IHKSvdUj9q + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.gIZjq1lTD9 ++ mktemp + local LAST_ERR=/tmp/tmp.KDObR3tPWy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gIZjq1lTD9 issuer.cert-manager.io "some-name-psmdb-issuer" deleted from tls-issue-cert-manager-18440 namespace + cat /tmp/tmp.KDObR3tPWy + rm /tmp/tmp.gIZjq1lTD9 /tmp/tmp.KDObR3tPWy + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml ++ mktemp + local LAST_OUT=/tmp/tmp.rdUpSPiQKx ++ mktemp + local LAST_ERR=/tmp/tmp.6eP9k0j3MM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rdUpSPiQKx certificate.cert-manager.io "some-name-ca-cert" deleted from tls-issue-cert-manager-18440 namespace + cat /tmp/tmp.6eP9k0j3MM + rm /tmp/tmp.rdUpSPiQKx /tmp/tmp.6eP9k0j3MM + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ardsY4qs57 ++ mktemp + local LAST_ERR=/tmp/tmp.gE8vjHq8Qj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ardsY4qs57 certificate.cert-manager.io "some-name-ssl-internal" deleted from tls-issue-cert-manager-18440 namespace + cat /tmp/tmp.gE8vjHq8Qj + rm /tmp/tmp.ardsY4qs57 /tmp/tmp.gE8vjHq8Qj + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WBM9TgG27u ++ mktemp + local LAST_ERR=/tmp/tmp.cEcGXeB2pM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WBM9TgG27u certificate.cert-manager.io "some-name-ssl" deleted from tls-issue-cert-manager-18440 namespace + cat /tmp/tmp.cEcGXeB2pM + rm /tmp/tmp.WBM9TgG27u /tmp/tmp.cEcGXeB2pM + return 0 + sleep 30 + desc 'delete ssl secrets, operator should recreate them' + set +o xtrace ----------------------------------------------------------------------------------- delete ssl secrets, operator should recreate them ----------------------------------------------------------------------------------- + kubectl_bin delete secret some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.VvShRYQE5K ++ mktemp + local LAST_ERR=/tmp/tmp.KNsAIvhmrK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete secret some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VvShRYQE5K secret "some-name-ssl-internal" deleted from tls-issue-cert-manager-18440 namespace + cat /tmp/tmp.KNsAIvhmrK + rm /tmp/tmp.VvShRYQE5K /tmp/tmp.KNsAIvhmrK + return 0 + kubectl_bin delete secret some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.m4N8E3Wukm ++ mktemp + local LAST_ERR=/tmp/tmp.nCzhPOndaj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete secret some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m4N8E3Wukm secret "some-name-ssl" deleted from tls-issue-cert-manager-18440 namespace + cat /tmp/tmp.nCzhPOndaj + rm /tmp/tmp.m4N8E3Wukm /tmp/tmp.nCzhPOndaj + return 0 + sleep 30 + desc 'recreate PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- recreate PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' ++ mktemp + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2148-fce4e0a0"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.Cu5VMJf2nG ++ mktemp + local LAST_ERR=/tmp/tmp.z5q37QVzx9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cu5VMJf2nG perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.z5q37QVzx9 + rm /tmp/tmp.Cu5VMJf2nG /tmp/tmp.z5q37QVzx9 + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ro2mHfSrhy +++ mktemp ++ local LAST_ERR=/tmp/tmp.jfi5fCWlYy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ro2mHfSrhy ++ cat /tmp/tmp.jfi5fCWlYy ++ rm /tmp/tmp.ro2mHfSrhy /tmp/tmp.jfi5fCWlYy ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SnuhO1B5C7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mqwL6DIikc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SnuhO1B5C7 ++ cat /tmp/tmp.mqwL6DIikc ++ rm /tmp/tmp.SnuhO1B5C7 /tmp/tmp.mqwL6DIikc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L3kEKPrEyy +++ mktemp ++ local LAST_ERR=/tmp/tmp.W7pdliCNRH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L3kEKPrEyy ++ cat /tmp/tmp.W7pdliCNRH ++ rm /tmp/tmp.L3kEKPrEyy /tmp/tmp.W7pdliCNRH ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Iy44FXzXUE +++ mktemp ++ local LAST_ERR=/tmp/tmp.9vtzF7klyS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Iy44FXzXUE ++ cat /tmp/tmp.9vtzF7klyS ++ rm /tmp/tmp.Iy44FXzXUE /tmp/tmp.9vtzF7klyS ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NPuqtlMFqv +++ mktemp ++ local LAST_ERR=/tmp/tmp.2omCgmc5Mv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NPuqtlMFqv ++ cat /tmp/tmp.2omCgmc5Mv ++ rm /tmp/tmp.NPuqtlMFqv /tmp/tmp.2omCgmc5Mv ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oDAstn9hho +++ mktemp ++ local LAST_ERR=/tmp/tmp.wTIm125VN9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oDAstn9hho ++ cat /tmp/tmp.wTIm125VN9 ++ rm /tmp/tmp.oDAstn9hho /tmp/tmp.wTIm125VN9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RFh43E83lA +++ mktemp ++ local LAST_ERR=/tmp/tmp.7s2MEJyKcF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RFh43E83lA ++ cat /tmp/tmp.7s2MEJyKcF ++ rm /tmp/tmp.RFh43E83lA /tmp/tmp.7s2MEJyKcF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mTmDf0sKRI +++ mktemp ++ local LAST_ERR=/tmp/tmp.PA9LdcyuFr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mTmDf0sKRI ++ cat /tmp/tmp.PA9LdcyuFr ++ rm /tmp/tmp.mTmDf0sKRI /tmp/tmp.PA9LdcyuFr ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UENZHEkrJM +++ mktemp ++ local LAST_ERR=/tmp/tmp.aOzavwntCl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UENZHEkrJM ++ cat /tmp/tmp.aOzavwntCl ++ rm /tmp/tmp.UENZHEkrJM /tmp/tmp.aOzavwntCl ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vGf5B9Zpag ++ mktemp + local LAST_ERR=/tmp/tmp.4gcSS3zZWK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vGf5B9Zpag + cat /tmp/tmp.4gcSS3zZWK + rm /tmp/tmp.vGf5B9Zpag /tmp/tmp.4gcSS3zZWK + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-17T12:50:25+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | ++ mktemp del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.tGeoUt8cnx ++ mktemp + local LAST_ERR=/tmp/tmp.gppOC8IA3Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tGeoUt8cnx + cat /tmp/tmp.gppOC8IA3Y + rm /tmp/tmp.tGeoUt8cnx /tmp/tmp.gppOC8IA3Y + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-17T12:50:26+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.nwIBnB5kBG ++ mktemp + local LAST_ERR=/tmp/tmp.J8BmapXYtI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nwIBnB5kBG + cat /tmp/tmp.J8BmapXYtI + rm /tmp/tmp.nwIBnB5kBG /tmp/tmp.J8BmapXYtI + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-12-17T12:50:27+0000] compare_kubectl: statefulset/some-name-mongos OK + desc 'check if certificates issued with certmanager' + set +o xtrace ----------------------------------------------------------------------------------- check if certificates issued with certmanager ----------------------------------------------------------------------------------- + check_tls_secret some-name-ssl + local secret_name=some-name-ssl + check_secret_data_key some-name-ssl ca.crt + local secret_name=some-name-ssl + local data_key=ca.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["ca.crt"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R6EmyWPFUJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.mcaaoNcxlA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R6EmyWPFUJ ++ cat /tmp/tmp.mcaaoNcxlA ++ rm /tmp/tmp.R6EmyWPFUJ /tmp/tmp.mcaaoNcxlA ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lVUmw5Tk1zb3V4WHJVdldhbDhFbjRUa0s0dDc4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEl4TnpFeU5ERXlOVm9YRFRJMgpNVEl4TnpFeU5ERXlOVm93RnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNSUlCSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEvUHppaWVMeUJyQzMxWUpzU0NvV0d6K2hGd0R2Um5qQjJ6SWEKTVVaK3hvbWxtUjRKZTV0OEdXdk9QN0J2U2FnZmtBUHl2ZFFOVjY3L2JZMWd5MnZNekpzek1oeWs1bGNJS1IveQpDcjZ2Z2Rmbjg4VzNmS1k1alBwOTZKRFNDWXBLdTlvb2tpLzFYYks0R3d1WlZxak4xSW1OYTdzRUVWbkhLbUlxClF2UHdsNVpLZ1pXenB5UmUzNUc4SjhRWm0wanNwMVdnUXBlZ0JQaVJHYkU4QlUrTkx1QStzdWVoMmhSVXlqY1EKOExuS3hpbTVzanloSmgvSlh5dW5HUnRnZVQrNEl2aHNXcGRKMDk1R04rZW9LNlE2OHdnZlN3bkU4c2syU0c3TApKa0Rub0ZJaFcrODlJckwyOG5qSlVoTm1tVTRoY08rYXVUNGpuRGVqT3BoenpIM3h5d0lEQVFBQm8wSXdRREFPCkJnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVME5CdHhrK00KdUlGZmJvaktJamQ4NTFRTmo2UXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRy9aT0Q5d0NQdzVZMWVNazh6dgozQnVrdXBNbTV0eFluK3doL2lDR21Nb3duaVJEMStmN3MvTUdIRndqRm8rRUZ6T2pUUWo1SnJKdDNaSFcxdnhXCmoxQ2tiRVA1UGkvS2FGTzdEbDMzT2gzbVE1ZGVIekxnN0FwME15M1I5aFI0SkJpYTFQYlRmUTMxV2FZZ24rN20KNkw5T2E3R2NwT1pJR2RyYTFxbjcyV3c3dURiTkhZV3V3QjhaSDN1dktNYXFxNjRuUVB6SUdkZ2ZsRWcyUzJXQgp0V3VrODk4eDFGREVuTGx0dDllVXNaNEZrVGF2QlFlR2lkY1p6TTdYM0NvVUU1RVJDRHUrVnJZODBSRHRXVWZaCnNMcHIzTTRHaG93aHI1SEE3ekNKZXN2ZDRXR3JvSFhXbHhrM1F6eE9iZ0k2bXR5SlF1c3U3ME5qVUVYOTFoNUYKMlhnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lVUmw5Tk1zb3V4WHJVdldhbDhFbjRUa0s0dDc4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEl4TnpFeU5ERXlOVm9YRFRJMgpNVEl4TnpFeU5ERXlOVm93RnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNSUlCSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEvUHppaWVMeUJyQzMxWUpzU0NvV0d6K2hGd0R2Um5qQjJ6SWEKTVVaK3hvbWxtUjRKZTV0OEdXdk9QN0J2U2FnZmtBUHl2ZFFOVjY3L2JZMWd5MnZNekpzek1oeWs1bGNJS1IveQpDcjZ2Z2Rmbjg4VzNmS1k1alBwOTZKRFNDWXBLdTlvb2tpLzFYYks0R3d1WlZxak4xSW1OYTdzRUVWbkhLbUlxClF2UHdsNVpLZ1pXenB5UmUzNUc4SjhRWm0wanNwMVdnUXBlZ0JQaVJHYkU4QlUrTkx1QStzdWVoMmhSVXlqY1EKOExuS3hpbTVzanloSmgvSlh5dW5HUnRnZVQrNEl2aHNXcGRKMDk1R04rZW9LNlE2OHdnZlN3bkU4c2syU0c3TApKa0Rub0ZJaFcrODlJckwyOG5qSlVoTm1tVTRoY08rYXVUNGpuRGVqT3BoenpIM3h5d0lEQVFBQm8wSXdRREFPCkJnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVME5CdHhrK00KdUlGZmJvaktJamQ4NTFRTmo2UXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRy9aT0Q5d0NQdzVZMWVNazh6dgozQnVrdXBNbTV0eFluK3doL2lDR21Nb3duaVJEMStmN3MvTUdIRndqRm8rRUZ6T2pUUWo1SnJKdDNaSFcxdnhXCmoxQ2tiRVA1UGkvS2FGTzdEbDMzT2gzbVE1ZGVIekxnN0FwME15M1I5aFI0SkJpYTFQYlRmUTMxV2FZZ24rN20KNkw5T2E3R2NwT1pJR2RyYTFxbjcyV3c3dURiTkhZV3V3QjhaSDN1dktNYXFxNjRuUVB6SUdkZ2ZsRWcyUzJXQgp0V3VrODk4eDFGREVuTGx0dDllVXNaNEZrVGF2QlFlR2lkY1p6TTdYM0NvVUU1RVJDRHUrVnJZODBSRHRXVWZaCnNMcHIzTTRHaG93aHI1SEE3ekNKZXN2ZDRXR3JvSFhXbHhrM1F6eE9iZ0k2bXR5SlF1c3U3ME5qVUVYOTFoNUYKMlhnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="' ']' + check_secret_data_key some-name-ssl tls.crt + local secret_name=some-name-ssl + local data_key=tls.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["tls.crt"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MrYDBcqs8a +++ mktemp ++ local LAST_ERR=/tmp/tmp.GrCcOJlwgd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MrYDBcqs8a ++ cat /tmp/tmp.GrCcOJlwgd ++ rm /tmp/tmp.MrYDBcqs8a /tmp/tmp.GrCcOJlwgd ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg2ekNDQnRPZ0F3SUJBZ0lVUVFYcmF5aTBaVGZORWxGb2VOYytaMW04NTVFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEl4TnpFeU5EY3hOVm9YRFRJMgpNRE14TnpFeU5EY3hOVm93SkRFT01Bd0dBMVVFQ2hNRlVGTk5SRUl4RWpBUUJnTlZCQU1UQ1hOdmJXVXRibUZ0ClpUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtySVhNWVZBVG5YUXVtUEcxYmkKd25qTXdEMWFlTnN4VStuVUFFN3E3K1B5b0RPVDYrdDRhTTNBeTRpODFZYVlMUVBtTHc1S0cwWHMyWWQzVFRXTwppWnA3YnZEMy9aZDg5NUtvTnVsRndnSEs4akNMTC93TWM5UnZ5ZDRkSEFsaFhWZi93Z0dmbmZFRVJOTU1RRDE0CjJPNGJmV2NXZFFqZnY3NHhBTFI2TUxtbUpacFZ6MG9IUHBWeWVBMlcxNUE2VFFubDBuVk1SRCtSUG1taGwrc0QKMjE3ZXdWMEh6Vm1OV1VtZmIxc2o1dk1WTFJxdm5aOGs4TlRkdnF1RzdseURRcEo1bm1pMmdNbndrSU1XVjU1RgpDcHdrSElhQVovOHVJMnB4VWpFLzlIMkh3dVJCc1ZEVWVjbW5ZckczRkcza3Zxa0hsUjA1Z2F2d0hzNGJYODh5CllUY0NBd0VBQWFPQ0JTQXdnZ1VjTUE0R0ExVWREd0VCL3dRRUF3SUZvREFNQmdOVkhSTUJBZjhFQWpBQU1COEcKQTFVZEl3UVlNQmFBRk5EUWJjWlBqTGlCWDI2SXlpSTNmT2RVRFkra01JSUUyUVlEVlIwUkJJSUUwRENDQk15QwpDV3h2WTJGc2FHOXpkSUlOYzI5dFpTMXVZVzFsTFhKek1JSXFjMjl0WlMxdVlXMWxMWEp6TUM1MGJITXRhWE56CmRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFNE5EUXdnanh6YjIxbExXNWhiV1V0Y25Nd0xuUnNjeTFwYzNOMVpTMWoKWlhKMExXMWhibUZuWlhJdE1UZzBOREF1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDRHlvdWMyOXRaUzF1WVcxbApMWEp6TUlJc0tpNXpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVGcwCk5EQ0NQaW91YzI5dFpTMXVZVzFsTFhKek1DNTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFNE5EUXcKTG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2o5emIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqWlhKMApMVzFoYm1GblpYSXRNVGcwTkRBdWMzWmpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsCkxYSnpNQzUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTRORFF3TG5OMll5NWpiSFZ6ZEdWeWMyVjAKTG14dlkyRnNnak1xTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVGcwTkRBdWMzWmpMbU5zZFhOMApaWEp6WlhRdWJHOWpZV3lDRUhOdmJXVXRibUZ0WlMxdGIyNW5iM09DTFhOdmJXVXRibUZ0WlMxdGIyNW5iM011CmRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4T0RRME1JSS9jMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTAKYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTRORFF3TG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2hJcQpMbk52YldVdGJtRnRaUzF0YjI1bmIzT0NMeW91YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEU0TkRRd2drRXFMbk52YldVdGJtRnRaUzF0YjI1bmIzTXVkR3h6TFdsemMzVmwKTFdObGNuUXRiV0Z1WVdkbGNpMHhPRFEwTUM1emRtTXVZMngxYzNSbGNpNXNiMk5oYklJTmMyOXRaUzF1WVcxbApMV05tWjRJcWMyOXRaUzF1WVcxbExXTm1aeTUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTRORFF3CmdqeHpiMjFsTFc1aGJXVXRZMlpuTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVGcwTkRBdWMzWmoKTG1Oc2RYTjBaWEl1Ykc5allXeUNEeW91YzI5dFpTMXVZVzFsTFdObVo0SXNLaTV6YjIxbExXNWhiV1V0WTJabgpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UZzBORENDUGlvdWMyOXRaUzF1WVcxbExXTm1aeTUwCmJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEU0TkRRd0xuTjJZeTVqYkhWemRHVnlMbXh2WTJGc2drSnoKYjIxbExXNWhiV1V0Ylc5dVoyOXpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UZzBOREF1YzNaagpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNSQ291YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEU0TkRRd0xuTjJZeTVqYkhWemRHVnljMlYwTG14dlkyRnNnajl6YjIxbExXNWgKYldVdFkyWm5MblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UZzBOREF1YzNaakxtTnNkWE4wWlhKegpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsTFdObVp5NTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5CkxURTRORFF3TG5OMll5NWpiSFZ6ZEdWeWMyVjBMbXh2WTJGc01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQmsKZTQ3TDF0LzV3Qm5QbFhVSFMvMHFiSENGQ1hoN3hhTndkOXpNTVVGNFljakVxQ0V4cmp6NXFpL2l4Q2REQzBZNgpobW1VNGxhNHVkN2dsd04xQmpZd0MxL2pNZmxwVTBWMVRPcVhjdTN6bDFCLytyWE03NnQxbHJvNEViYzRobVZsCktZWnFKbWVxUmE0U081SlA3RFd2eGsvYUlpNm1uR3NwMEI0Z1M0dVRoM0s2MXd4SWZEUHNyNnJFMlEzY1RrSzkKM3VMRU9QZmJuaUJtb2hLaFJjdXdPSHFicXBvL0VZRlc1NXBBVnJlWmNkMTZNNUlxWVlBS0JpNE1Ra2xWOHVOaQo4VXFYN2hQeW5rR2cwMDRPT3E3a2NzVWlnQUJDeWxmNTQyV09YYWx1WnFXR0V1Si9laXhCdVRwc05ySUgyU3lsCndidFRqaUhZUFNOdklDWXdNY08rCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg2ekNDQnRPZ0F3SUJBZ0lVUVFYcmF5aTBaVGZORWxGb2VOYytaMW04NTVFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEl4TnpFeU5EY3hOVm9YRFRJMgpNRE14TnpFeU5EY3hOVm93SkRFT01Bd0dBMVVFQ2hNRlVGTk5SRUl4RWpBUUJnTlZCQU1UQ1hOdmJXVXRibUZ0ClpUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtySVhNWVZBVG5YUXVtUEcxYmkKd25qTXdEMWFlTnN4VStuVUFFN3E3K1B5b0RPVDYrdDRhTTNBeTRpODFZYVlMUVBtTHc1S0cwWHMyWWQzVFRXTwppWnA3YnZEMy9aZDg5NUtvTnVsRndnSEs4akNMTC93TWM5UnZ5ZDRkSEFsaFhWZi93Z0dmbmZFRVJOTU1RRDE0CjJPNGJmV2NXZFFqZnY3NHhBTFI2TUxtbUpacFZ6MG9IUHBWeWVBMlcxNUE2VFFubDBuVk1SRCtSUG1taGwrc0QKMjE3ZXdWMEh6Vm1OV1VtZmIxc2o1dk1WTFJxdm5aOGs4TlRkdnF1RzdseURRcEo1bm1pMmdNbndrSU1XVjU1RgpDcHdrSElhQVovOHVJMnB4VWpFLzlIMkh3dVJCc1ZEVWVjbW5ZckczRkcza3Zxa0hsUjA1Z2F2d0hzNGJYODh5CllUY0NBd0VBQWFPQ0JTQXdnZ1VjTUE0R0ExVWREd0VCL3dRRUF3SUZvREFNQmdOVkhSTUJBZjhFQWpBQU1COEcKQTFVZEl3UVlNQmFBRk5EUWJjWlBqTGlCWDI2SXlpSTNmT2RVRFkra01JSUUyUVlEVlIwUkJJSUUwRENDQk15QwpDV3h2WTJGc2FHOXpkSUlOYzI5dFpTMXVZVzFsTFhKek1JSXFjMjl0WlMxdVlXMWxMWEp6TUM1MGJITXRhWE56CmRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFNE5EUXdnanh6YjIxbExXNWhiV1V0Y25Nd0xuUnNjeTFwYzNOMVpTMWoKWlhKMExXMWhibUZuWlhJdE1UZzBOREF1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDRHlvdWMyOXRaUzF1WVcxbApMWEp6TUlJc0tpNXpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVGcwCk5EQ0NQaW91YzI5dFpTMXVZVzFsTFhKek1DNTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFNE5EUXcKTG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2o5emIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqWlhKMApMVzFoYm1GblpYSXRNVGcwTkRBdWMzWmpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsCkxYSnpNQzUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTRORFF3TG5OMll5NWpiSFZ6ZEdWeWMyVjAKTG14dlkyRnNnak1xTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVGcwTkRBdWMzWmpMbU5zZFhOMApaWEp6WlhRdWJHOWpZV3lDRUhOdmJXVXRibUZ0WlMxdGIyNW5iM09DTFhOdmJXVXRibUZ0WlMxdGIyNW5iM011CmRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4T0RRME1JSS9jMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTAKYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTRORFF3TG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2hJcQpMbk52YldVdGJtRnRaUzF0YjI1bmIzT0NMeW91YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEU0TkRRd2drRXFMbk52YldVdGJtRnRaUzF0YjI1bmIzTXVkR3h6TFdsemMzVmwKTFdObGNuUXRiV0Z1WVdkbGNpMHhPRFEwTUM1emRtTXVZMngxYzNSbGNpNXNiMk5oYklJTmMyOXRaUzF1WVcxbApMV05tWjRJcWMyOXRaUzF1WVcxbExXTm1aeTUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTRORFF3CmdqeHpiMjFsTFc1aGJXVXRZMlpuTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVGcwTkRBdWMzWmoKTG1Oc2RYTjBaWEl1Ykc5allXeUNEeW91YzI5dFpTMXVZVzFsTFdObVo0SXNLaTV6YjIxbExXNWhiV1V0WTJabgpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UZzBORENDUGlvdWMyOXRaUzF1WVcxbExXTm1aeTUwCmJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEU0TkRRd0xuTjJZeTVqYkhWemRHVnlMbXh2WTJGc2drSnoKYjIxbExXNWhiV1V0Ylc5dVoyOXpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UZzBOREF1YzNaagpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNSQ291YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEU0TkRRd0xuTjJZeTVqYkhWemRHVnljMlYwTG14dlkyRnNnajl6YjIxbExXNWgKYldVdFkyWm5MblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UZzBOREF1YzNaakxtTnNkWE4wWlhKegpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsTFdObVp5NTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5CkxURTRORFF3TG5OMll5NWpiSFZ6ZEdWeWMyVjBMbXh2WTJGc01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQmsKZTQ3TDF0LzV3Qm5QbFhVSFMvMHFiSENGQ1hoN3hhTndkOXpNTVVGNFljakVxQ0V4cmp6NXFpL2l4Q2REQzBZNgpobW1VNGxhNHVkN2dsd04xQmpZd0MxL2pNZmxwVTBWMVRPcVhjdTN6bDFCLytyWE03NnQxbHJvNEViYzRobVZsCktZWnFKbWVxUmE0U081SlA3RFd2eGsvYUlpNm1uR3NwMEI0Z1M0dVRoM0s2MXd4SWZEUHNyNnJFMlEzY1RrSzkKM3VMRU9QZmJuaUJtb2hLaFJjdXdPSHFicXBvL0VZRlc1NXBBVnJlWmNkMTZNNUlxWVlBS0JpNE1Ra2xWOHVOaQo4VXFYN2hQeW5rR2cwMDRPT3E3a2NzVWlnQUJDeWxmNTQyV09YYWx1WnFXR0V1Si9laXhCdVRwc05ySUgyU3lsCndidFRqaUhZUFNOdklDWXdNY08rCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"' ']' + check_secret_data_key some-name-ssl tls.key + local secret_name=some-name-ssl + local data_key=tls.key + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["tls.key"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fRwkZzpavH +++ mktemp ++ local LAST_ERR=/tmp/tmp.WGwop6D5v8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fRwkZzpavH ++ cat /tmp/tmp.WGwop6D5v8 ++ rm /tmp/tmp.fRwkZzpavH /tmp/tmp.WGwop6D5v8 ++ return 0 + secret_data='"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcXNoY3hoVUJPZGRDNlk4YlZ1TENlTXpBUFZwNDJ6RlQ2ZFFBVHVydjQvS2dNNVByCjYzaG96Y0RMaUx6VmhwZ3RBK1l2RGtvYlJlelpoM2ROTlk2Sm1udHU4UGY5bDN6M2txZzI2VVhDQWNyeU1Jc3YKL0F4ejFHL0ozaDBjQ1dGZFYvL0NBWitkOFFSRTB3eEFQWGpZN2h0OVp4WjFDTisvdmpFQXRIb3d1YVlsbWxYUApTZ2MrbFhKNERaYlhrRHBOQ2VYU2RVeEVQNUUrYWFHWDZ3UGJYdDdCWFFmTldZMVpTWjl2V3lQbTh4VXRHcStkCm55VHcxTjIrcTRidVhJTkNrbm1lYUxhQXlmQ1FneFpYbmtVS25DUWNob0JuL3k0amFuRlNNVC8wZllmQzVFR3gKVU5SNXlhZGlzYmNVYmVTK3FRZVZIVG1CcS9BZXpodGZ6ekpoTndJREFRQUJBb0lCQURpVW5ndndMSkpOcTNqMgp0UStSbSsxcXBHd1BPOGdlN1Jha2FwKzZ2VlJ2Ynp6M1RtdWFYbHFBZTg0bkJiVU1XS21ZaXRGNG90V0ZWVUpJCnQ3YzlMSUNXVVZpYlNKUjJUZzEwRnBFdFQ4dkpRWGpzdkM2TXI1bGJtZTB4QnJkaTN3cUhETzc5dzFWMDNScHcKemgydm1tczBoMVR1ZjBLNzRtb3dFUFE2SVVkMzVlT0xJeGwvZ3MyRDhzNkFhZVAwWEtyQXpVUUpqRkdrajZSNgpSYVlpc2VDS1R4TVZSWExyZmFqMHJaWEJic2VOT3lNREVSQ05VT0c5Ylkza2NIU1ZrcW8wRytiS1E2bzgxMUdLCmREUXRIMnBsc2dMbnl0UGJpbXRQdTdyNG8yZWlXc040cENmWWtKVEEydnlBWEJ2T3M5QWlqK3g0MkVFRnk5LzYKUGdKajk5MENnWUVBM2hUS2xqckJDaDJFK0V4Z0lRVmtiK0luWWpJS21IYVB4TTFBbGxpd3BOYmtHdmhuVTBxSApyRDJsaGVNdlkzRm9vcHVNZXdKdlBXRHhCRVZ2dWtUNUwrUFhFZnRiMzFSejNwbGx1WGJ3RXBkbVdrQkpncDA4CktNTnhMaG5rK1BQOFRnTlpDN0hFMjF4Z2haMW8xV2kyM1VDZm8zQWZyTlJObHlxTjNjMElGV01DZ1lFQXhOM1cKRmFuRTNidFVxdUtBWG9XM0l2V082VGtIejZaQ3Y5MGZDd0xMd2lPWDA3ZUE4dUtJR2RUNXlDVEhmRThFSjFBRApxK2VSbG0rV2pGMTR6bnRNUnIxWEhqR1FEME5vT0Y5WmREeXhQV0FsTlpWb1FvWjhRUkVOeFM2QWFDcFNjM0swClltTHdYb0JRTDkrZHNGcVk2YWc4TFY3cjFBM09NKzNzN1pVNHh4MENnWUE5NTFYQklvZUVwazNXV0pQRElKbXEKaGtPc0gzN01tTi9pWDhSTkpGTzQzeDJlVnZlV1JBUWxndG1NeEpNRkNTdTloOVpVdzR6MmN2czcwWThmUVBLawo0c0draFloS2JHUVAvSHBhaEVqUEpkeWw4NTJLQndJcDZyQzF4UW9LU3hoZE5wbXZwTkhXdzUwMElYaURaRGowCjIzcWtPRERjSUNHd3JXLzJENXpkT1FLQmdIdHFTclRIK2llN01hZnpXT0JvMlZkc004eFh1V0xlNndlVzJzZ3YKVjRkOGdzanlTRDk3N2pDcmlHK0ZSeDNSOTF1aDJMdGk3NjBzaFdlM2pXMThkR1FBZk9YcjBsRVBRLzNiSUp3egpGRFhTMldEUEtrNVNPd0ViYk5PSmhHeEk0K21HSmpscnB4N0hOLzNiSk9NUzJMdmFnT2M4azRFYnFvSjVqTWVnCmZiaXRBb0dCQUx2eUF5L0REZndoc0xqMnZZMlhDVEZPaVRwOURjdUlXL1p2NTRpQVJLTnllNExybHJmTEp1M04KZTVDRFlDeEdTaXBQQ1V3V3JOaFhBVGRPUUYwTGJyWEVnMElsa1p2bjVFVm1UZjVvMit5bUh2Q2I2NUY2VE1wdgp4SjFscFg5eVREVVh2ckpHcG44MzN2dE1SNkZBaEwvRlZrTjlDQS85K2tpMG9FRkxqR0t5Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="' + '[' -z '"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcXNoY3hoVUJPZGRDNlk4YlZ1TENlTXpBUFZwNDJ6RlQ2ZFFBVHVydjQvS2dNNVByCjYzaG96Y0RMaUx6VmhwZ3RBK1l2RGtvYlJlelpoM2ROTlk2Sm1udHU4UGY5bDN6M2txZzI2VVhDQWNyeU1Jc3YKL0F4ejFHL0ozaDBjQ1dGZFYvL0NBWitkOFFSRTB3eEFQWGpZN2h0OVp4WjFDTisvdmpFQXRIb3d1YVlsbWxYUApTZ2MrbFhKNERaYlhrRHBOQ2VYU2RVeEVQNUUrYWFHWDZ3UGJYdDdCWFFmTldZMVpTWjl2V3lQbTh4VXRHcStkCm55VHcxTjIrcTRidVhJTkNrbm1lYUxhQXlmQ1FneFpYbmtVS25DUWNob0JuL3k0amFuRlNNVC8wZllmQzVFR3gKVU5SNXlhZGlzYmNVYmVTK3FRZVZIVG1CcS9BZXpodGZ6ekpoTndJREFRQUJBb0lCQURpVW5ndndMSkpOcTNqMgp0UStSbSsxcXBHd1BPOGdlN1Jha2FwKzZ2VlJ2Ynp6M1RtdWFYbHFBZTg0bkJiVU1XS21ZaXRGNG90V0ZWVUpJCnQ3YzlMSUNXVVZpYlNKUjJUZzEwRnBFdFQ4dkpRWGpzdkM2TXI1bGJtZTB4QnJkaTN3cUhETzc5dzFWMDNScHcKemgydm1tczBoMVR1ZjBLNzRtb3dFUFE2SVVkMzVlT0xJeGwvZ3MyRDhzNkFhZVAwWEtyQXpVUUpqRkdrajZSNgpSYVlpc2VDS1R4TVZSWExyZmFqMHJaWEJic2VOT3lNREVSQ05VT0c5Ylkza2NIU1ZrcW8wRytiS1E2bzgxMUdLCmREUXRIMnBsc2dMbnl0UGJpbXRQdTdyNG8yZWlXc040cENmWWtKVEEydnlBWEJ2T3M5QWlqK3g0MkVFRnk5LzYKUGdKajk5MENnWUVBM2hUS2xqckJDaDJFK0V4Z0lRVmtiK0luWWpJS21IYVB4TTFBbGxpd3BOYmtHdmhuVTBxSApyRDJsaGVNdlkzRm9vcHVNZXdKdlBXRHhCRVZ2dWtUNUwrUFhFZnRiMzFSejNwbGx1WGJ3RXBkbVdrQkpncDA4CktNTnhMaG5rK1BQOFRnTlpDN0hFMjF4Z2haMW8xV2kyM1VDZm8zQWZyTlJObHlxTjNjMElGV01DZ1lFQXhOM1cKRmFuRTNidFVxdUtBWG9XM0l2V082VGtIejZaQ3Y5MGZDd0xMd2lPWDA3ZUE4dUtJR2RUNXlDVEhmRThFSjFBRApxK2VSbG0rV2pGMTR6bnRNUnIxWEhqR1FEME5vT0Y5WmREeXhQV0FsTlpWb1FvWjhRUkVOeFM2QWFDcFNjM0swClltTHdYb0JRTDkrZHNGcVk2YWc4TFY3cjFBM09NKzNzN1pVNHh4MENnWUE5NTFYQklvZUVwazNXV0pQRElKbXEKaGtPc0gzN01tTi9pWDhSTkpGTzQzeDJlVnZlV1JBUWxndG1NeEpNRkNTdTloOVpVdzR6MmN2czcwWThmUVBLawo0c0draFloS2JHUVAvSHBhaEVqUEpkeWw4NTJLQndJcDZyQzF4UW9LU3hoZE5wbXZwTkhXdzUwMElYaURaRGowCjIzcWtPRERjSUNHd3JXLzJENXpkT1FLQmdIdHFTclRIK2llN01hZnpXT0JvMlZkc004eFh1V0xlNndlVzJzZ3YKVjRkOGdzanlTRDk3N2pDcmlHK0ZSeDNSOTF1aDJMdGk3NjBzaFdlM2pXMThkR1FBZk9YcjBsRVBRLzNiSUp3egpGRFhTMldEUEtrNVNPd0ViYk5PSmhHeEk0K21HSmpscnB4N0hOLzNiSk9NUzJMdmFnT2M4azRFYnFvSjVqTWVnCmZiaXRBb0dCQUx2eUF5L0REZndoc0xqMnZZMlhDVEZPaVRwOURjdUlXL1p2NTRpQVJLTnllNExybHJmTEp1M04KZTVDRFlDeEdTaXBQQ1V3V3JOaFhBVGRPUUYwTGJyWEVnMElsa1p2bjVFVm1UZjVvMit5bUh2Q2I2NUY2VE1wdgp4SjFscFg5eVREVVh2ckpHcG44MzN2dE1SNkZBaEwvRlZrTjlDQS85K2tpMG9FRkxqR0t5Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="' ']' + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vQFr6T30xB ++ mktemp + local LAST_ERR=/tmp/tmp.nfpvcHYLEQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vQFr6T30xB + cat /tmp/tmp.nfpvcHYLEQ + rm /tmp/tmp.vQFr6T30xB /tmp/tmp.nfpvcHYLEQ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-ca-issuer OK' + set +o xtrace [2025-12-17T12:50:32+0000] compare_kubectl: issuer/some-name-psmdb-ca-issuer OK + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | ++ mktemp del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.aw2t0Tgf86 ++ mktemp + local LAST_ERR=/tmp/tmp.HIGXY6paRQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aw2t0Tgf86 + cat /tmp/tmp.HIGXY6paRQ + rm /tmp/tmp.aw2t0Tgf86 /tmp/tmp.HIGXY6paRQ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-issuer OK' + set +o xtrace [2025-12-17T12:50:33+0000] compare_kubectl: issuer/some-name-psmdb-issuer OK + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.6Kwiu350vR ++ mktemp + local LAST_ERR=/tmp/tmp.E0gXy0RTm1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6Kwiu350vR + cat /tmp/tmp.E0gXy0RTm1 + rm /tmp/tmp.6Kwiu350vR /tmp/tmp.E0gXy0RTm1 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + log 'compare_kubectl: certificate/some-name-ssl OK' + set +o xtrace [2025-12-17T12:50:34+0000] compare_kubectl: certificate/some-name-ssl OK + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.XQiU9E4gDZ ++ mktemp + local LAST_ERR=/tmp/tmp.YvGMAC8jGY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XQiU9E4gDZ + cat /tmp/tmp.YvGMAC8jGY + rm /tmp/tmp.XQiU9E4gDZ /tmp/tmp.YvGMAC8jGY + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + log 'compare_kubectl: certificate/some-name-ssl-internal OK' + set +o xtrace [2025-12-17T12:50:35+0000] compare_kubectl: certificate/some-name-ssl-internal OK + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9TDX5ncwB8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1j9PvZnflm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9TDX5ncwB8 ++ cat /tmp/tmp.1j9PvZnflm ++ rm /tmp/tmp.9TDX5ncwB8 /tmp/tmp.1j9PvZnflm ++ return 0 + pod_name=cmctl-7c7f6bf77d-mmb5v + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AlmKD7mq49 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KKnOjVDBa5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AlmKD7mq49 ++ cat /tmp/tmp.KKnOjVDBa5 ++ rm /tmp/tmp.AlmKD7mq49 /tmp/tmp.KKnOjVDBa5 ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-mmb5v -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.UQ2sItorpm ++ mktemp + local LAST_ERR=/tmp/tmp.fWv7s7bDjZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-mmb5v -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UQ2sItorpm Manually triggered issuance of Certificate tls-issue-cert-manager-18440/some-name-ssl + cat /tmp/tmp.fWv7s7bDjZ + rm /tmp/tmp.UQ2sItorpm /tmp/tmp.fWv7s7bDjZ + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4p488J9gox +++ mktemp ++ local LAST_ERR=/tmp/tmp.OMwkYhLpEp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4p488J9gox ++ cat /tmp/tmp.OMwkYhLpEp ++ rm /tmp/tmp.4p488J9gox /tmp/tmp.OMwkYhLpEp ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UQKWhGrsIy +++ mktemp ++ local LAST_ERR=/tmp/tmp.NwYMggnUH8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UQKWhGrsIy ++ cat /tmp/tmp.NwYMggnUH8 ++ rm /tmp/tmp.UQKWhGrsIy /tmp/tmp.NwYMggnUH8 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8NhaKhrpGQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ml0Sjzj5ZY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8NhaKhrpGQ ++ cat /tmp/tmp.ml0Sjzj5ZY ++ rm /tmp/tmp.8NhaKhrpGQ /tmp/tmp.ml0Sjzj5ZY ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BnfxrTKTJB +++ mktemp ++ local LAST_ERR=/tmp/tmp.1CRoGlP6WA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BnfxrTKTJB ++ cat /tmp/tmp.1CRoGlP6WA ++ rm /tmp/tmp.BnfxrTKTJB /tmp/tmp.1CRoGlP6WA ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................................................................................................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vXdeYsKsbO +++ mktemp ++ local LAST_ERR=/tmp/tmp.6VXi5squBo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vXdeYsKsbO ++ cat /tmp/tmp.6VXi5squBo ++ rm /tmp/tmp.vXdeYsKsbO /tmp/tmp.6VXi5squBo ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TY4MQqbMFg +++ mktemp ++ local LAST_ERR=/tmp/tmp.mooAeJx2YA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TY4MQqbMFg ++ cat /tmp/tmp.mooAeJx2YA ++ rm /tmp/tmp.TY4MQqbMFg /tmp/tmp.mooAeJx2YA ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qrbSL3NEEr +++ mktemp ++ local LAST_ERR=/tmp/tmp.c3LjCV5vIF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qrbSL3NEEr ++ cat /tmp/tmp.c3LjCV5vIF ++ rm /tmp/tmp.qrbSL3NEEr /tmp/tmp.c3LjCV5vIF ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.71zgtI7lAs +++ mktemp ++ local LAST_ERR=/tmp/tmp.XVTwl23PZk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.71zgtI7lAs ++ cat /tmp/tmp.XVTwl23PZk ++ rm /tmp/tmp.71zgtI7lAs /tmp/tmp.XVTwl23PZk ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JAxCHcWQow +++ mktemp ++ local LAST_ERR=/tmp/tmp.jBgtjG4L0v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JAxCHcWQow ++ cat /tmp/tmp.jBgtjG4L0v ++ rm /tmp/tmp.JAxCHcWQow /tmp/tmp.jBgtjG4L0v ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D80KXW9BHj +++ mktemp ++ local LAST_ERR=/tmp/tmp.LeWUB3NTnu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D80KXW9BHj ++ cat /tmp/tmp.LeWUB3NTnu ++ rm /tmp/tmp.D80KXW9BHj /tmp/tmp.LeWUB3NTnu ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fFSvSbynjo +++ mktemp ++ local LAST_ERR=/tmp/tmp.3t7Rew3Et1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fFSvSbynjo ++ cat /tmp/tmp.3t7Rew3Et1 ++ rm /tmp/tmp.fFSvSbynjo /tmp/tmp.3t7Rew3Et1 ++ return 0 + pod_name=cmctl-7c7f6bf77d-mmb5v + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QI12egSUIh +++ mktemp ++ local LAST_ERR=/tmp/tmp.N7l5P5Mnc4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QI12egSUIh ++ cat /tmp/tmp.N7l5P5Mnc4 ++ rm /tmp/tmp.QI12egSUIh /tmp/tmp.N7l5P5Mnc4 ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-mmb5v -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.AB18BORKTI ++ mktemp + local LAST_ERR=/tmp/tmp.jQbLWZEGEc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-mmb5v -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AB18BORKTI Manually triggered issuance of Certificate tls-issue-cert-manager-18440/some-name-ssl-internal + cat /tmp/tmp.jQbLWZEGEc + rm /tmp/tmp.AB18BORKTI /tmp/tmp.jQbLWZEGEc + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BaaqDEVeCP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ob3rtVUbdD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BaaqDEVeCP ++ cat /tmp/tmp.Ob3rtVUbdD ++ rm /tmp/tmp.BaaqDEVeCP /tmp/tmp.Ob3rtVUbdD ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dAKmT6IfTn +++ mktemp ++ local LAST_ERR=/tmp/tmp.s9H4NhOkiQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dAKmT6IfTn ++ cat /tmp/tmp.s9H4NhOkiQ ++ rm /tmp/tmp.dAKmT6IfTn /tmp/tmp.s9H4NhOkiQ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zXc9FAst1w +++ mktemp ++ local LAST_ERR=/tmp/tmp.QrL3yc9eA1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zXc9FAst1w ++ cat /tmp/tmp.QrL3yc9eA1 ++ rm /tmp/tmp.zXc9FAst1w /tmp/tmp.QrL3yc9eA1 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8n6H1iwDiE +++ mktemp ++ local LAST_ERR=/tmp/tmp.l0s77DTFEo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8n6H1iwDiE ++ cat /tmp/tmp.l0s77DTFEo ++ rm /tmp/tmp.8n6H1iwDiE /tmp/tmp.l0s77DTFEo ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................................................ + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j9EUp3WpGC +++ mktemp ++ local LAST_ERR=/tmp/tmp.sqzD6XGS25 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j9EUp3WpGC ++ cat /tmp/tmp.sqzD6XGS25 ++ rm /tmp/tmp.j9EUp3WpGC /tmp/tmp.sqzD6XGS25 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xfI6eMkcdt +++ mktemp ++ local LAST_ERR=/tmp/tmp.73QVUb8nrW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xfI6eMkcdt ++ cat /tmp/tmp.73QVUb8nrW ++ rm /tmp/tmp.xfI6eMkcdt /tmp/tmp.73QVUb8nrW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZOj2tWhpwH +++ mktemp ++ local LAST_ERR=/tmp/tmp.yiHkYHUZhm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZOj2tWhpwH ++ cat /tmp/tmp.yiHkYHUZhm ++ rm /tmp/tmp.ZOj2tWhpwH /tmp/tmp.yiHkYHUZhm ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LB5ROwDQpR +++ mktemp ++ local LAST_ERR=/tmp/tmp.EpohrGfotP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LB5ROwDQpR ++ cat /tmp/tmp.EpohrGfotP ++ rm /tmp/tmp.LB5ROwDQpR /tmp/tmp.EpohrGfotP ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L42UTvHPjz +++ mktemp ++ local LAST_ERR=/tmp/tmp.kAK1ggMSUl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L42UTvHPjz ++ cat /tmp/tmp.kAK1ggMSUl ++ rm /tmp/tmp.L42UTvHPjz /tmp/tmp.kAK1ggMSUl ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HxAAtbkMzT +++ mktemp ++ local LAST_ERR=/tmp/tmp.2JZ5XYczb3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HxAAtbkMzT ++ cat /tmp/tmp.2JZ5XYczb3 ++ rm /tmp/tmp.HxAAtbkMzT /tmp/tmp.2JZ5XYczb3 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.uU0T3CF92o ++ mktemp + local LAST_ERR=/tmp/tmp.bSng2LKV38 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uU0T3CF92o + cat /tmp/tmp.bSng2LKV38 + rm /tmp/tmp.uU0T3CF92o /tmp/tmp.bSng2LKV38 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-ca-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-ca-issuer OK' + set +o xtrace [2025-12-17T13:03:14+0000] compare_kubectl: issuer/some-name-psmdb-ca-issuer OK + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KAi7e97x5e ++ mktemp + local LAST_ERR=/tmp/tmp.qF0d6DHtZU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KAi7e97x5e + cat /tmp/tmp.qF0d6DHtZU + rm /tmp/tmp.KAi7e97x5e /tmp/tmp.qF0d6DHtZU + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.pN37kVL2rr/issuer_some-name-psmdb-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-issuer OK' + set +o xtrace [2025-12-17T13:03:14+0000] compare_kubectl: issuer/some-name-psmdb-issuer OK + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.9GPiqz7Sj3 ++ mktemp + local LAST_ERR=/tmp/tmp.HwGR9CiU5A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9GPiqz7Sj3 + cat /tmp/tmp.HwGR9CiU5A + rm /tmp/tmp.9GPiqz7Sj3 /tmp/tmp.HwGR9CiU5A + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl.yml + log 'compare_kubectl: certificate/some-name-ssl OK' + set +o xtrace [2025-12-17T13:03:15+0000] compare_kubectl: certificate/some-name-ssl OK + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1pIG4eyApJ ++ mktemp + local LAST_ERR=/tmp/tmp.9AlGsFKmbe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1pIG4eyApJ + cat /tmp/tmp.9AlGsFKmbe + rm /tmp/tmp.1pIG4eyApJ /tmp/tmp.9AlGsFKmbe + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.pN37kVL2rr/certificate_some-name-ssl-internal.yml + log 'compare_kubectl: certificate/some-name-ssl-internal OK' + set +o xtrace [2025-12-17T13:03:17+0000] compare_kubectl: certificate/some-name-ssl-internal OK + desc 'disable TLS' + set +o xtrace ----------------------------------------------------------------------------------- disable TLS ----------------------------------------------------------------------------------- + pause_cluster some-name + local cluster_name=some-name + echo 'Pausing cluster some-name' Pausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' ++ mktemp + local LAST_OUT=/tmp/tmp.jdpNb3p1gU ++ mktemp + local LAST_ERR=/tmp/tmp.PRSfxjXhnS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jdpNb3p1gU perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.PRSfxjXhnS + rm /tmp/tmp.jdpNb3p1gU /tmp/tmp.PRSfxjXhnS + return 0 + wait_for_cluster_state some-name paused + local cluster_name=some-name + local target_state=paused + echo -n 'Waiting for psmdb/some-name to reach paused state' Waiting for psmdb/some-name to reach paused state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E0ztd2i03S +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Y7sUXmu6L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E0ztd2i03S ++ cat /tmp/tmp.3Y7sUXmu6L ++ rm /tmp/tmp.E0ztd2i03S /tmp/tmp.3Y7sUXmu6L ++ return 0 + [[ ready =~ paused ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.klFZj6ttD5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HCsa1BsiYS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.klFZj6ttD5 ++ cat /tmp/tmp.HCsa1BsiYS ++ rm /tmp/tmp.klFZj6ttD5 /tmp/tmp.HCsa1BsiYS ++ return 0 + [[ ready =~ paused ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w06i4lsgcn +++ mktemp ++ local LAST_ERR=/tmp/tmp.PZf8ZDEEdn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w06i4lsgcn ++ cat /tmp/tmp.PZf8ZDEEdn ++ rm /tmp/tmp.w06i4lsgcn /tmp/tmp.PZf8ZDEEdn ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IdTsFt0nQi +++ mktemp ++ local LAST_ERR=/tmp/tmp.MEhHDNbB6p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IdTsFt0nQi ++ cat /tmp/tmp.MEhHDNbB6p ++ rm /tmp/tmp.IdTsFt0nQi /tmp/tmp.MEhHDNbB6p ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rIjAevPJEn +++ mktemp ++ local LAST_ERR=/tmp/tmp.NAqbwYgdGP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rIjAevPJEn ++ cat /tmp/tmp.NAqbwYgdGP ++ rm /tmp/tmp.rIjAevPJEn /tmp/tmp.NAqbwYgdGP ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MaP7HLmCo1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Iy3FxM4owo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MaP7HLmCo1 ++ cat /tmp/tmp.Iy3FxM4owo ++ rm /tmp/tmp.MaP7HLmCo1 /tmp/tmp.Iy3FxM4owo ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vc8lAM9qL1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8Hwb8jAGNd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vc8lAM9qL1 ++ cat /tmp/tmp.8Hwb8jAGNd ++ rm /tmp/tmp.Vc8lAM9qL1 /tmp/tmp.8Hwb8jAGNd ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Glg7RQdkWr +++ mktemp ++ local LAST_ERR=/tmp/tmp.a962oBG3gw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Glg7RQdkWr ++ cat /tmp/tmp.a962oBG3gw ++ rm /tmp/tmp.Glg7RQdkWr /tmp/tmp.a962oBG3gw ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JJ6VykGy6d +++ mktemp ++ local LAST_ERR=/tmp/tmp.99tEVpawRt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JJ6VykGy6d ++ cat /tmp/tmp.99tEVpawRt ++ rm /tmp/tmp.JJ6VykGy6d /tmp/tmp.99tEVpawRt ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VocqlbPmFn +++ mktemp ++ local LAST_ERR=/tmp/tmp.U19n3cCIRh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VocqlbPmFn ++ cat /tmp/tmp.U19n3cCIRh ++ rm /tmp/tmp.VocqlbPmFn /tmp/tmp.U19n3cCIRh ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vCvnBHXyag +++ mktemp ++ local LAST_ERR=/tmp/tmp.VTGqmQbbgj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vCvnBHXyag ++ cat /tmp/tmp.VTGqmQbbgj ++ rm /tmp/tmp.vCvnBHXyag /tmp/tmp.VTGqmQbbgj ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f28ujKIsBK +++ mktemp ++ local LAST_ERR=/tmp/tmp.4JkhsicJxj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f28ujKIsBK ++ cat /tmp/tmp.4JkhsicJxj ++ rm /tmp/tmp.f28ujKIsBK /tmp/tmp.4JkhsicJxj ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jc0JO7oCaE +++ mktemp ++ local LAST_ERR=/tmp/tmp.iL1cwRuJd1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jc0JO7oCaE ++ cat /tmp/tmp.iL1cwRuJd1 ++ rm /tmp/tmp.Jc0JO7oCaE /tmp/tmp.iL1cwRuJd1 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RP0Oke8f1F +++ mktemp ++ local LAST_ERR=/tmp/tmp.g55R4KK5IJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RP0Oke8f1F ++ cat /tmp/tmp.g55R4KK5IJ ++ rm /tmp/tmp.RP0Oke8f1F /tmp/tmp.g55R4KK5IJ ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ak6n2u83Ir +++ mktemp ++ local LAST_ERR=/tmp/tmp.j5aTckiqiR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ak6n2u83Ir ++ cat /tmp/tmp.j5aTckiqiR ++ rm /tmp/tmp.ak6n2u83Ir /tmp/tmp.j5aTckiqiR ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KwRYiml652 +++ mktemp ++ local LAST_ERR=/tmp/tmp.96q4CUggph ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KwRYiml652 ++ cat /tmp/tmp.96q4CUggph ++ rm /tmp/tmp.KwRYiml652 /tmp/tmp.96q4CUggph ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9sNeqBD2Um +++ mktemp ++ local LAST_ERR=/tmp/tmp.dGtjA5gMj2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9sNeqBD2Um ++ cat /tmp/tmp.dGtjA5gMj2 ++ rm /tmp/tmp.9sNeqBD2Um /tmp/tmp.dGtjA5gMj2 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mkfnfxnGdY +++ mktemp ++ local LAST_ERR=/tmp/tmp.W7Ha1sPu5z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mkfnfxnGdY ++ cat /tmp/tmp.W7Ha1sPu5z ++ rm /tmp/tmp.mkfnfxnGdY /tmp/tmp.W7Ha1sPu5z ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E0LQxJ217D +++ mktemp ++ local LAST_ERR=/tmp/tmp.0zJG9gBbU6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E0LQxJ217D ++ cat /tmp/tmp.0zJG9gBbU6 ++ rm /tmp/tmp.E0LQxJ217D /tmp/tmp.0zJG9gBbU6 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4J9RjIc9uV +++ mktemp ++ local LAST_ERR=/tmp/tmp.ikhIOxriOp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4J9RjIc9uV ++ cat /tmp/tmp.ikhIOxriOp ++ rm /tmp/tmp.4J9RjIc9uV /tmp/tmp.ikhIOxriOp ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n5zADyBxUB +++ mktemp ++ local LAST_ERR=/tmp/tmp.TmqDkvTyha ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n5zADyBxUB ++ cat /tmp/tmp.TmqDkvTyha ++ rm /tmp/tmp.n5zADyBxUB /tmp/tmp.TmqDkvTyha ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NeUjL2JpIi +++ mktemp ++ local LAST_ERR=/tmp/tmp.VHJEB8E3Nn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NeUjL2JpIi ++ cat /tmp/tmp.VHJEB8E3Nn ++ rm /tmp/tmp.NeUjL2JpIi /tmp/tmp.VHJEB8E3Nn ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PMHQeO2bXH +++ mktemp ++ local LAST_ERR=/tmp/tmp.7Ey60clrdZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PMHQeO2bXH ++ cat /tmp/tmp.7Ey60clrdZ ++ rm /tmp/tmp.PMHQeO2bXH /tmp/tmp.7Ey60clrdZ ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C7hV6dCWt8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EqkAWUtKjo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C7hV6dCWt8 ++ cat /tmp/tmp.EqkAWUtKjo ++ rm /tmp/tmp.C7hV6dCWt8 /tmp/tmp.EqkAWUtKjo ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ywTheVcNVx +++ mktemp ++ local LAST_ERR=/tmp/tmp.wk4HcWePTN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ywTheVcNVx ++ cat /tmp/tmp.wk4HcWePTN ++ rm /tmp/tmp.ywTheVcNVx /tmp/tmp.wk4HcWePTN ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QyOaPX5pwY +++ mktemp ++ local LAST_ERR=/tmp/tmp.HjVqsGYWff ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QyOaPX5pwY ++ cat /tmp/tmp.HjVqsGYWff ++ rm /tmp/tmp.QyOaPX5pwY /tmp/tmp.HjVqsGYWff ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zAq7Hb5m8A +++ mktemp ++ local LAST_ERR=/tmp/tmp.vJ8IkPi7B3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zAq7Hb5m8A ++ cat /tmp/tmp.vJ8IkPi7B3 ++ rm /tmp/tmp.zAq7Hb5m8A /tmp/tmp.vJ8IkPi7B3 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7jUoXBEB1r +++ mktemp ++ local LAST_ERR=/tmp/tmp.82SR3x2lVS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7jUoXBEB1r ++ cat /tmp/tmp.82SR3x2lVS ++ rm /tmp/tmp.7jUoXBEB1r /tmp/tmp.82SR3x2lVS ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.luDGUKg3IY +++ mktemp ++ local LAST_ERR=/tmp/tmp.l29AukHYyw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.luDGUKg3IY ++ cat /tmp/tmp.l29AukHYyw ++ rm /tmp/tmp.luDGUKg3IY /tmp/tmp.l29AukHYyw ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tiOVVZg0o1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vAobJtrO5y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tiOVVZg0o1 ++ cat /tmp/tmp.vAobJtrO5y ++ rm /tmp/tmp.tiOVVZg0o1 /tmp/tmp.vAobJtrO5y ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zg2tOHRWR9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7i7eKtirUf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zg2tOHRWR9 ++ cat /tmp/tmp.7i7eKtirUf ++ rm /tmp/tmp.Zg2tOHRWR9 /tmp/tmp.7i7eKtirUf ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0sq24TSQVq +++ mktemp ++ local LAST_ERR=/tmp/tmp.SCt13uJBM9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0sq24TSQVq ++ cat /tmp/tmp.SCt13uJBM9 ++ rm /tmp/tmp.0sq24TSQVq /tmp/tmp.SCt13uJBM9 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nmU6MyFI3m +++ mktemp ++ local LAST_ERR=/tmp/tmp.VBPjydvibR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nmU6MyFI3m ++ cat /tmp/tmp.VBPjydvibR ++ rm /tmp/tmp.nmU6MyFI3m /tmp/tmp.VBPjydvibR ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ViVDx1Glqc +++ mktemp ++ local LAST_ERR=/tmp/tmp.FMoMI7Lezv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ViVDx1Glqc ++ cat /tmp/tmp.FMoMI7Lezv ++ rm /tmp/tmp.ViVDx1Glqc /tmp/tmp.FMoMI7Lezv ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vcu9OrHugS +++ mktemp ++ local LAST_ERR=/tmp/tmp.otqIDi5Cs5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vcu9OrHugS ++ cat /tmp/tmp.otqIDi5Cs5 ++ rm /tmp/tmp.Vcu9OrHugS /tmp/tmp.otqIDi5Cs5 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uvk2ysvgJo +++ mktemp ++ local LAST_ERR=/tmp/tmp.pPKNBjhI3u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uvk2ysvgJo ++ cat /tmp/tmp.pPKNBjhI3u ++ rm /tmp/tmp.Uvk2ysvgJo /tmp/tmp.pPKNBjhI3u ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S8Q2i44qnZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.5sO1iK44RE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S8Q2i44qnZ ++ cat /tmp/tmp.5sO1iK44RE ++ rm /tmp/tmp.S8Q2i44qnZ /tmp/tmp.5sO1iK44RE ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9l2lsE2PHG +++ mktemp ++ local LAST_ERR=/tmp/tmp.oHpMLNpMKj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9l2lsE2PHG ++ cat /tmp/tmp.oHpMLNpMKj ++ rm /tmp/tmp.9l2lsE2PHG /tmp/tmp.oHpMLNpMKj ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6OIjpZbniQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.mHVKuszEE3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6OIjpZbniQ ++ cat /tmp/tmp.mHVKuszEE3 ++ rm /tmp/tmp.6OIjpZbniQ /tmp/tmp.mHVKuszEE3 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BAQcW0EsFJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.QhTiGVVEzJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BAQcW0EsFJ ++ cat /tmp/tmp.QhTiGVVEzJ ++ rm /tmp/tmp.BAQcW0EsFJ /tmp/tmp.QhTiGVVEzJ ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8MMOxFe4uP +++ mktemp ++ local LAST_ERR=/tmp/tmp.H6Japjhvpq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8MMOxFe4uP ++ cat /tmp/tmp.H6Japjhvpq ++ rm /tmp/tmp.8MMOxFe4uP /tmp/tmp.H6Japjhvpq ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bf3eEtZPin +++ mktemp ++ local LAST_ERR=/tmp/tmp.kxoaExeSaG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bf3eEtZPin ++ cat /tmp/tmp.kxoaExeSaG ++ rm /tmp/tmp.bf3eEtZPin /tmp/tmp.kxoaExeSaG ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gXPldreD1U +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZQ8ngoPvQ7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gXPldreD1U ++ cat /tmp/tmp.ZQ8ngoPvQ7 ++ rm /tmp/tmp.gXPldreD1U /tmp/tmp.ZQ8ngoPvQ7 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Um629sfNIB +++ mktemp ++ local LAST_ERR=/tmp/tmp.bKsPkxBs1g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Um629sfNIB ++ cat /tmp/tmp.bKsPkxBs1g ++ rm /tmp/tmp.Um629sfNIB /tmp/tmp.bKsPkxBs1g ++ return 0 + [[ error =~ paused ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H65mEpuUgA +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Ky9sZ1bqI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H65mEpuUgA ++ cat /tmp/tmp.3Ky9sZ1bqI ++ rm /tmp/tmp.H65mEpuUgA /tmp/tmp.3Ky9sZ1bqI ++ return 0 + [[ error =~ paused ]] + sleep 1 + timeout=45 + echo -n . .+ [[ 45 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZzEHLEqerA +++ mktemp ++ local LAST_ERR=/tmp/tmp.QdGpGyZitw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZzEHLEqerA ++ cat /tmp/tmp.QdGpGyZitw ++ rm /tmp/tmp.ZzEHLEqerA /tmp/tmp.QdGpGyZitw ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=46 + echo -n . .+ [[ 46 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7UoC0N4GE2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AJrMcXC5rP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7UoC0N4GE2 ++ cat /tmp/tmp.AJrMcXC5rP ++ rm /tmp/tmp.7UoC0N4GE2 /tmp/tmp.AJrMcXC5rP ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=47 + echo -n . .+ [[ 47 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.suuiPIY8TK +++ mktemp ++ local LAST_ERR=/tmp/tmp.85pStKYCr4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.suuiPIY8TK ++ cat /tmp/tmp.85pStKYCr4 ++ rm /tmp/tmp.suuiPIY8TK /tmp/tmp.85pStKYCr4 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=48 + echo -n . .+ [[ 48 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ubJm2dAVK6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kPD9H11JMQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ubJm2dAVK6 ++ cat /tmp/tmp.kPD9H11JMQ ++ rm /tmp/tmp.ubJm2dAVK6 /tmp/tmp.kPD9H11JMQ ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=49 + echo -n . .+ [[ 49 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z9hGimDh8x +++ mktemp ++ local LAST_ERR=/tmp/tmp.h98yYbi9zr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z9hGimDh8x ++ cat /tmp/tmp.h98yYbi9zr ++ rm /tmp/tmp.Z9hGimDh8x /tmp/tmp.h98yYbi9zr ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=50 + echo -n . .+ [[ 50 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8TN7iXpzvb +++ mktemp ++ local LAST_ERR=/tmp/tmp.apDNb7AZVR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8TN7iXpzvb ++ cat /tmp/tmp.apDNb7AZVR ++ rm /tmp/tmp.8TN7iXpzvb /tmp/tmp.apDNb7AZVR ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=51 + echo -n . .+ [[ 51 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZJTwOyQNsJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.zkMdN3GmMD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZJTwOyQNsJ ++ cat /tmp/tmp.zkMdN3GmMD ++ rm /tmp/tmp.ZJTwOyQNsJ /tmp/tmp.zkMdN3GmMD ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=52 + echo -n . .+ [[ 52 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3EcVxkPCiZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vn0g9Szn3U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3EcVxkPCiZ ++ cat /tmp/tmp.Vn0g9Szn3U ++ rm /tmp/tmp.3EcVxkPCiZ /tmp/tmp.Vn0g9Szn3U ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=53 + echo -n . .+ [[ 53 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R3pmx8K5Ow +++ mktemp ++ local LAST_ERR=/tmp/tmp.fXZ1MmLpJX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R3pmx8K5Ow ++ cat /tmp/tmp.fXZ1MmLpJX ++ rm /tmp/tmp.R3pmx8K5Ow /tmp/tmp.fXZ1MmLpJX ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=54 + echo -n . .+ [[ 54 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n2o95YFcOS +++ mktemp ++ local LAST_ERR=/tmp/tmp.xni1wIaRq8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n2o95YFcOS ++ cat /tmp/tmp.xni1wIaRq8 ++ rm /tmp/tmp.n2o95YFcOS /tmp/tmp.xni1wIaRq8 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=55 + echo -n . .+ [[ 55 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZqImZJpOTW +++ mktemp ++ local LAST_ERR=/tmp/tmp.37pgGt8a3o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZqImZJpOTW ++ cat /tmp/tmp.37pgGt8a3o ++ rm /tmp/tmp.ZqImZJpOTW /tmp/tmp.37pgGt8a3o ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=56 + echo -n . .+ [[ 56 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vVFTALJel0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5T7PQYOWnS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vVFTALJel0 ++ cat /tmp/tmp.5T7PQYOWnS ++ rm /tmp/tmp.vVFTALJel0 /tmp/tmp.5T7PQYOWnS ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=57 + echo -n . .+ [[ 57 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vz06uGvm4b +++ mktemp ++ local LAST_ERR=/tmp/tmp.CPNktJLBcs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vz06uGvm4b ++ cat /tmp/tmp.CPNktJLBcs ++ rm /tmp/tmp.Vz06uGvm4b /tmp/tmp.CPNktJLBcs ++ return 0 + [[ paused =~ paused ]] + echo + log 'psmdb/some-name is paused: OK' + set +o xtrace [2025-12-17T13:05:14+0000] psmdb/some-name is paused: OK + disable_tls some-name + local cluster_name=some-name + echo 'Disabling TLS for cluster some-name' Disabling TLS for cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' ++ mktemp + local LAST_OUT=/tmp/tmp.OgyQK0MZe7 ++ mktemp + local LAST_ERR=/tmp/tmp.v4hZ3tf5JV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OgyQK0MZe7 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.v4hZ3tf5JV + rm /tmp/tmp.OgyQK0MZe7 /tmp/tmp.v4hZ3tf5JV + return 0 + unpause_cluster some-name + local cluster_name=some-name + echo 'Unpausing cluster some-name' Unpausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' ++ mktemp + local LAST_OUT=/tmp/tmp.n4Msfi31M9 ++ mktemp + local LAST_ERR=/tmp/tmp.zKJrvvvqJt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n4Msfi31M9 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.zKJrvvvqJt + rm /tmp/tmp.n4Msfi31M9 /tmp/tmp.zKJrvvvqJt + return 0 + wait_for_cluster_state some-name ready + local cluster_name=some-name + local target_state=ready + echo -n 'Waiting for psmdb/some-name to reach ready state' Waiting for psmdb/some-name to reach ready state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CYnmNlSYJm +++ mktemp ++ local LAST_ERR=/tmp/tmp.3JOCFEW84N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CYnmNlSYJm ++ cat /tmp/tmp.3JOCFEW84N ++ rm /tmp/tmp.CYnmNlSYJm /tmp/tmp.3JOCFEW84N ++ return 0 + [[ paused =~ ready ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mwa0MqvPKs +++ mktemp ++ local LAST_ERR=/tmp/tmp.eaRqyVIceE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mwa0MqvPKs ++ cat /tmp/tmp.eaRqyVIceE ++ rm /tmp/tmp.mwa0MqvPKs /tmp/tmp.eaRqyVIceE ++ return 0 + [[ paused =~ ready ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5rZmmqwosj +++ mktemp ++ local LAST_ERR=/tmp/tmp.okpMoms4Cb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5rZmmqwosj ++ cat /tmp/tmp.okpMoms4Cb ++ rm /tmp/tmp.5rZmmqwosj /tmp/tmp.okpMoms4Cb ++ return 0 + [[ paused =~ ready ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.izgxIslV8h +++ mktemp ++ local LAST_ERR=/tmp/tmp.vahorrTG6H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.izgxIslV8h ++ cat /tmp/tmp.vahorrTG6H ++ rm /tmp/tmp.izgxIslV8h /tmp/tmp.vahorrTG6H ++ return 0 + [[ paused =~ ready ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JTbeWnDAGr +++ mktemp ++ local LAST_ERR=/tmp/tmp.k4e1WikrbL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JTbeWnDAGr ++ cat /tmp/tmp.k4e1WikrbL ++ rm /tmp/tmp.JTbeWnDAGr /tmp/tmp.k4e1WikrbL ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oWyK1vt9Te +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Y4H0yA1Cm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oWyK1vt9Te ++ cat /tmp/tmp.6Y4H0yA1Cm ++ rm /tmp/tmp.oWyK1vt9Te /tmp/tmp.6Y4H0yA1Cm ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V3icDoTUPr +++ mktemp ++ local LAST_ERR=/tmp/tmp.qyIrZI9MzH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V3icDoTUPr ++ cat /tmp/tmp.qyIrZI9MzH ++ rm /tmp/tmp.V3icDoTUPr /tmp/tmp.qyIrZI9MzH ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zAzah5uTSR +++ mktemp ++ local LAST_ERR=/tmp/tmp.aBW9JKRLKs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zAzah5uTSR ++ cat /tmp/tmp.aBW9JKRLKs ++ rm /tmp/tmp.zAzah5uTSR /tmp/tmp.aBW9JKRLKs ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r3b5mDJuHb +++ mktemp ++ local LAST_ERR=/tmp/tmp.Swqyk0hJzm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r3b5mDJuHb ++ cat /tmp/tmp.Swqyk0hJzm ++ rm /tmp/tmp.r3b5mDJuHb /tmp/tmp.Swqyk0hJzm ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cbQrtbLOrD +++ mktemp ++ local LAST_ERR=/tmp/tmp.OlB6U1ChaH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cbQrtbLOrD ++ cat /tmp/tmp.OlB6U1ChaH ++ rm /tmp/tmp.cbQrtbLOrD /tmp/tmp.OlB6U1ChaH ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u2hSfKR7VS +++ mktemp ++ local LAST_ERR=/tmp/tmp.JkYrio6N81 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u2hSfKR7VS ++ cat /tmp/tmp.JkYrio6N81 ++ rm /tmp/tmp.u2hSfKR7VS /tmp/tmp.JkYrio6N81 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ywc6L3tAWt +++ mktemp ++ local LAST_ERR=/tmp/tmp.4COIMFToL6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ywc6L3tAWt ++ cat /tmp/tmp.4COIMFToL6 ++ rm /tmp/tmp.Ywc6L3tAWt /tmp/tmp.4COIMFToL6 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ncJS8X8xmx +++ mktemp ++ local LAST_ERR=/tmp/tmp.9DQvd7QsbN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ncJS8X8xmx ++ cat /tmp/tmp.9DQvd7QsbN ++ rm /tmp/tmp.ncJS8X8xmx /tmp/tmp.9DQvd7QsbN ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CsbS9eH10C +++ mktemp ++ local LAST_ERR=/tmp/tmp.tzEfeoElMh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CsbS9eH10C ++ cat /tmp/tmp.tzEfeoElMh ++ rm /tmp/tmp.CsbS9eH10C /tmp/tmp.tzEfeoElMh ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2ubzFd8kKJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.bdadxzTpOM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2ubzFd8kKJ ++ cat /tmp/tmp.bdadxzTpOM ++ rm /tmp/tmp.2ubzFd8kKJ /tmp/tmp.bdadxzTpOM ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7yJcyeNCKH +++ mktemp ++ local LAST_ERR=/tmp/tmp.t0r1Cs1KB7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7yJcyeNCKH ++ cat /tmp/tmp.t0r1Cs1KB7 ++ rm /tmp/tmp.7yJcyeNCKH /tmp/tmp.t0r1Cs1KB7 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rt0cM78dd8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NA2eIafxdt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rt0cM78dd8 ++ cat /tmp/tmp.NA2eIafxdt ++ rm /tmp/tmp.Rt0cM78dd8 /tmp/tmp.NA2eIafxdt ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bkzsWFYLHM +++ mktemp ++ local LAST_ERR=/tmp/tmp.P1KEqygVIk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bkzsWFYLHM ++ cat /tmp/tmp.P1KEqygVIk ++ rm /tmp/tmp.bkzsWFYLHM /tmp/tmp.P1KEqygVIk ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d3Pi05uo61 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cQQ6wZrTY8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d3Pi05uo61 ++ cat /tmp/tmp.cQQ6wZrTY8 ++ rm /tmp/tmp.d3Pi05uo61 /tmp/tmp.cQQ6wZrTY8 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fSsI0nGmjO +++ mktemp ++ local LAST_ERR=/tmp/tmp.kZlEaJzW3U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fSsI0nGmjO ++ cat /tmp/tmp.kZlEaJzW3U ++ rm /tmp/tmp.fSsI0nGmjO /tmp/tmp.kZlEaJzW3U ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KIpVQ4Mppm +++ mktemp ++ local LAST_ERR=/tmp/tmp.JlB07mjKHk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KIpVQ4Mppm ++ cat /tmp/tmp.JlB07mjKHk ++ rm /tmp/tmp.KIpVQ4Mppm /tmp/tmp.JlB07mjKHk ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zjd7y8cs9e +++ mktemp ++ local LAST_ERR=/tmp/tmp.i3Q77ouLzM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zjd7y8cs9e ++ cat /tmp/tmp.i3Q77ouLzM ++ rm /tmp/tmp.zjd7y8cs9e /tmp/tmp.i3Q77ouLzM ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bL6voaJzfN +++ mktemp ++ local LAST_ERR=/tmp/tmp.b9YkL6ZSQw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bL6voaJzfN ++ cat /tmp/tmp.b9YkL6ZSQw ++ rm /tmp/tmp.bL6voaJzfN /tmp/tmp.b9YkL6ZSQw ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ev0QbmCtKa +++ mktemp ++ local LAST_ERR=/tmp/tmp.unHcPG8CEv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ev0QbmCtKa ++ cat /tmp/tmp.unHcPG8CEv ++ rm /tmp/tmp.Ev0QbmCtKa /tmp/tmp.unHcPG8CEv ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NU12Gjx2hf +++ mktemp ++ local LAST_ERR=/tmp/tmp.8nVV5mRCPf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NU12Gjx2hf ++ cat /tmp/tmp.8nVV5mRCPf ++ rm /tmp/tmp.NU12Gjx2hf /tmp/tmp.8nVV5mRCPf ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mzQwVqkifI +++ mktemp ++ local LAST_ERR=/tmp/tmp.xAf6hIjeQN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mzQwVqkifI ++ cat /tmp/tmp.xAf6hIjeQN ++ rm /tmp/tmp.mzQwVqkifI /tmp/tmp.xAf6hIjeQN ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mc6Ag0RhP2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rZ54Vwiqmr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Mc6Ag0RhP2 ++ cat /tmp/tmp.rZ54Vwiqmr ++ rm /tmp/tmp.Mc6Ag0RhP2 /tmp/tmp.rZ54Vwiqmr ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.utBxDdgvv7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.P3PVhVTBDh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.utBxDdgvv7 ++ cat /tmp/tmp.P3PVhVTBDh ++ rm /tmp/tmp.utBxDdgvv7 /tmp/tmp.P3PVhVTBDh ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wezQ4T96c9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.smtzibX3jJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wezQ4T96c9 ++ cat /tmp/tmp.smtzibX3jJ ++ rm /tmp/tmp.wezQ4T96c9 /tmp/tmp.smtzibX3jJ ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mr4HfdYlzG +++ mktemp ++ local LAST_ERR=/tmp/tmp.tqZr6qgf5e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mr4HfdYlzG ++ cat /tmp/tmp.tqZr6qgf5e ++ rm /tmp/tmp.mr4HfdYlzG /tmp/tmp.tqZr6qgf5e ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mtqC836HL0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.c5vigfMtzm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mtqC836HL0 ++ cat /tmp/tmp.c5vigfMtzm ++ rm /tmp/tmp.mtqC836HL0 /tmp/tmp.c5vigfMtzm ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YXrpGeaAIb +++ mktemp ++ local LAST_ERR=/tmp/tmp.BRlbSUcxvl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YXrpGeaAIb ++ cat /tmp/tmp.BRlbSUcxvl ++ rm /tmp/tmp.YXrpGeaAIb /tmp/tmp.BRlbSUcxvl ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mfGFwPxdrj +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ygs4NP6X6h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mfGFwPxdrj ++ cat /tmp/tmp.Ygs4NP6X6h ++ rm /tmp/tmp.mfGFwPxdrj /tmp/tmp.Ygs4NP6X6h ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NX5g4kP8U1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fpmu849cma ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NX5g4kP8U1 ++ cat /tmp/tmp.Fpmu849cma ++ rm /tmp/tmp.NX5g4kP8U1 /tmp/tmp.Fpmu849cma ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0V09UycCUm +++ mktemp ++ local LAST_ERR=/tmp/tmp.g75Iwr1TOZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0V09UycCUm ++ cat /tmp/tmp.g75Iwr1TOZ ++ rm /tmp/tmp.0V09UycCUm /tmp/tmp.g75Iwr1TOZ ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VVVhpqLmxS +++ mktemp ++ local LAST_ERR=/tmp/tmp.YTk7aZ3UuG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VVVhpqLmxS ++ cat /tmp/tmp.YTk7aZ3UuG ++ rm /tmp/tmp.VVVhpqLmxS /tmp/tmp.YTk7aZ3UuG ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aDDNEaware +++ mktemp ++ local LAST_ERR=/tmp/tmp.eaEsB57NlQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aDDNEaware ++ cat /tmp/tmp.eaEsB57NlQ ++ rm /tmp/tmp.aDDNEaware /tmp/tmp.eaEsB57NlQ ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cam3cQn9jL +++ mktemp ++ local LAST_ERR=/tmp/tmp.ltFSaiidr4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cam3cQn9jL ++ cat /tmp/tmp.ltFSaiidr4 ++ rm /tmp/tmp.Cam3cQn9jL /tmp/tmp.ltFSaiidr4 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jxOrojpz0N +++ mktemp ++ local LAST_ERR=/tmp/tmp.5c5oQtJVbO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jxOrojpz0N ++ cat /tmp/tmp.5c5oQtJVbO ++ rm /tmp/tmp.jxOrojpz0N /tmp/tmp.5c5oQtJVbO ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hLeIDlOxla +++ mktemp ++ local LAST_ERR=/tmp/tmp.R6Tw9LCvDV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hLeIDlOxla ++ cat /tmp/tmp.R6Tw9LCvDV ++ rm /tmp/tmp.hLeIDlOxla /tmp/tmp.R6Tw9LCvDV ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5XddJDLP6C +++ mktemp ++ local LAST_ERR=/tmp/tmp.OtQP8X2dgS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5XddJDLP6C ++ cat /tmp/tmp.OtQP8X2dgS ++ rm /tmp/tmp.5XddJDLP6C /tmp/tmp.OtQP8X2dgS ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rpv0EnQBWQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.o9eqlVoJ4R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rpv0EnQBWQ ++ cat /tmp/tmp.o9eqlVoJ4R ++ rm /tmp/tmp.rpv0EnQBWQ /tmp/tmp.o9eqlVoJ4R ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UX2UkCENm1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7z8hnGw2UY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UX2UkCENm1 ++ cat /tmp/tmp.7z8hnGw2UY ++ rm /tmp/tmp.UX2UkCENm1 /tmp/tmp.7z8hnGw2UY ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XkFAxuUxcx +++ mktemp ++ local LAST_ERR=/tmp/tmp.6MGTaP3o85 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XkFAxuUxcx ++ cat /tmp/tmp.6MGTaP3o85 ++ rm /tmp/tmp.XkFAxuUxcx /tmp/tmp.6MGTaP3o85 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rrR5SyuCp0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.t1IYJnTZpa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rrR5SyuCp0 ++ cat /tmp/tmp.t1IYJnTZpa ++ rm /tmp/tmp.rrR5SyuCp0 /tmp/tmp.t1IYJnTZpa ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=45 + echo -n . .+ [[ 45 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QvGqzkwqyH +++ mktemp ++ local LAST_ERR=/tmp/tmp.qBSihQsWSR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QvGqzkwqyH ++ cat /tmp/tmp.qBSihQsWSR ++ rm /tmp/tmp.QvGqzkwqyH /tmp/tmp.qBSihQsWSR ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=46 + echo -n . .+ [[ 46 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dumQ3QpXg1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.K9tc208Hmg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dumQ3QpXg1 ++ cat /tmp/tmp.K9tc208Hmg ++ rm /tmp/tmp.dumQ3QpXg1 /tmp/tmp.K9tc208Hmg ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=47 + echo -n . .+ [[ 47 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gZsGG0AOsM +++ mktemp ++ local LAST_ERR=/tmp/tmp.KRCgbFjY53 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gZsGG0AOsM ++ cat /tmp/tmp.KRCgbFjY53 ++ rm /tmp/tmp.gZsGG0AOsM /tmp/tmp.KRCgbFjY53 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=48 + echo -n . .+ [[ 48 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XSZjWM31X0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DXKfD4dzcd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XSZjWM31X0 ++ cat /tmp/tmp.DXKfD4dzcd ++ rm /tmp/tmp.XSZjWM31X0 /tmp/tmp.DXKfD4dzcd ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=49 + echo -n . .+ [[ 49 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BAL2nDKIvg +++ mktemp ++ local LAST_ERR=/tmp/tmp.YN8s26UYbd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BAL2nDKIvg ++ cat /tmp/tmp.YN8s26UYbd ++ rm /tmp/tmp.BAL2nDKIvg /tmp/tmp.YN8s26UYbd ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=50 + echo -n . .+ [[ 50 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sVNDGi003m +++ mktemp ++ local LAST_ERR=/tmp/tmp.y3whcy9M3T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sVNDGi003m ++ cat /tmp/tmp.y3whcy9M3T ++ rm /tmp/tmp.sVNDGi003m /tmp/tmp.y3whcy9M3T ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=51 + echo -n . .+ [[ 51 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.odjoU3sofh +++ mktemp ++ local LAST_ERR=/tmp/tmp.xvxAcNMzpF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.odjoU3sofh ++ cat /tmp/tmp.xvxAcNMzpF ++ rm /tmp/tmp.odjoU3sofh /tmp/tmp.xvxAcNMzpF ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=52 + echo -n . .+ [[ 52 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sfEe7jhwvm +++ mktemp ++ local LAST_ERR=/tmp/tmp.IbhMjpsClJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sfEe7jhwvm ++ cat /tmp/tmp.IbhMjpsClJ ++ rm /tmp/tmp.sfEe7jhwvm /tmp/tmp.IbhMjpsClJ ++ return 0 + [[ ready =~ ready ]] + echo + log 'psmdb/some-name is ready: OK' + set +o xtrace [2025-12-17T13:07:07+0000] psmdb/some-name is ready: OK + compare_kubectl statefulset/some-name-rs0 -tls-disabled skip_generation_check + local resource=statefulset/some-name-rs0 + local postfix=-tls-disabled + local skip_generation_check=skip_generation_check + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml + local new_result=/tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.2pq56MlQsv ++ mktemp + local LAST_ERR=/tmp/tmp.tNBTKCodew + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2pq56MlQsv + cat /tmp/tmp.tNBTKCodew + rm /tmp/tmp.2pq56MlQsv /tmp/tmp.tNBTKCodew + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml == */cronjob* ]] + '[' -n skip_generation_check ']' + yq -i eval 'del(.metadata.generation)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml /tmp/tmp.pN37kVL2rr/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-17T13:07:07+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg -tls-disabled skip_generation_check + local resource=statefulset/some-name-cfg + local postfix=-tls-disabled + local skip_generation_check=skip_generation_check + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml + local new_result=/tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ypiDpKBYVH ++ mktemp + local LAST_ERR=/tmp/tmp.SW0VtmCx6b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ypiDpKBYVH + cat /tmp/tmp.SW0VtmCx6b + rm /tmp/tmp.ypiDpKBYVH /tmp/tmp.SW0VtmCx6b + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml == */cronjob* ]] + '[' -n skip_generation_check ']' + yq -i eval 'del(.metadata.generation)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml /tmp/tmp.pN37kVL2rr/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-17T13:07:08+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos -tls-disabled skip_generation_check + local resource=statefulset/some-name-mongos + local postfix=-tls-disabled + local skip_generation_check=skip_generation_check + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml + local new_result=/tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-18440", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.H2yRISboS4 ++ mktemp + local LAST_ERR=/tmp/tmp.T2jvN4MgTX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H2yRISboS4 + cat /tmp/tmp.T2jvN4MgTX + rm /tmp/tmp.H2yRISboS4 /tmp/tmp.T2jvN4MgTX + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml == */cronjob* ]] + '[' -n skip_generation_check ']' + yq -i eval 'del(.metadata.generation)' /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml /tmp/tmp.pN37kVL2rr/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-12-17T13:07:10+0000] compare_kubectl: statefulset/some-name-mongos OK + destroy tls-issue-cert-manager-18440 + local namespace=tls-issue-cert-manager-18440 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.qFx0pzwUIP +++ mktemp ++ local LAST_ERR=/tmp/tmp.MkN53OTzrZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qFx0pzwUIP ++ cat /tmp/tmp.MkN53OTzrZ No resources found in tls-issue-cert-manager-18440 namespace. ++ rm /tmp/tmp.qFx0pzwUIP /tmp/tmp.MkN53OTzrZ ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.daZZWgzXy2 ++ mktemp + local LAST_ERR=/tmp/tmp.mCwRrfJLaN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.daZZWgzXy2 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.mCwRrfJLaN + rm /tmp/tmp.daZZWgzXy2 /tmp/tmp.mCwRrfJLaN + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.jIke639FN3 ++ mktemp + local LAST_ERR=/tmp/tmp.lX2cXwA3At + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jIke639FN3 + cat /tmp/tmp.lX2cXwA3At + rm /tmp/tmp.jIke639FN3 /tmp/tmp.lX2cXwA3At + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.eQ5duWDI6o ++ mktemp + local LAST_ERR=/tmp/tmp.v1aGIC7sTw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eQ5duWDI6o + cat /tmp/tmp.v1aGIC7sTw + rm /tmp/tmp.eQ5duWDI6o /tmp/tmp.v1aGIC7sTw + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.FIVCjq2AwQ ++ mktemp + local LAST_ERR=/tmp/tmp.nfxWncQkYW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FIVCjq2AwQ + cat /tmp/tmp.nfxWncQkYW + rm /tmp/tmp.FIVCjq2AwQ /tmp/tmp.nfxWncQkYW + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.u9TwunPyF6 ++ mktemp + local LAST_ERR=/tmp/tmp.4qqa6WhsQH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2148/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u9TwunPyF6 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.4qqa6WhsQH + rm /tmp/tmp.u9TwunPyF6 /tmp/tmp.4qqa6WhsQH + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.petdT7pmdS ++ mktemp + local LAST_ERR=/tmp/tmp.vO2RKMWGBE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.petdT7pmdS namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.vO2RKMWGBE Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.petdT7pmdS + cat /tmp/tmp.vO2RKMWGBE Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.petdT7pmdS + cat /tmp/tmp.vO2RKMWGBE Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.petdT7pmdS + cat /tmp/tmp.vO2RKMWGBE Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.petdT7pmdS /tmp/tmp.vO2RKMWGBE + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace tls-issue-cert-manager-18440 + rm -rf /tmp/tmp.pN37kVL2rr + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.mU1wWZuMD5 + local LAST_OUT=/tmp/tmp.NNPnevVKX9 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.xv3qisv1ao + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.pY5y1yjpji + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace tls-issue-cert-manager-18440 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator