Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/logs/tls-issue-cert-manager.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra tls-issue-cert-manager-14931 + local ns=tls-issue-cert-manager-14931 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.t6cPTw4BNj ++ mktemp + local LAST_ERR=/tmp/tmp.Xx6USiz2tj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t6cPTw4BNj customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Xx6USiz2tj + rm /tmp/tmp.t6cPTw4BNj /tmp/tmp.Xx6USiz2tj + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3assoILLAG ++ mktemp + local LAST_ERR=/tmp/tmp.pAByj7YsiF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3assoILLAG + cat /tmp/tmp.pAByj7YsiF + rm /tmp/tmp.3assoILLAG /tmp/tmp.pAByj7YsiF + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.N5VdDhaCBU ++ mktemp + local LAST_ERR=/tmp/tmp.JHeYSLmBBJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N5VdDhaCBU + cat /tmp/tmp.JHeYSLmBBJ + rm /tmp/tmp.N5VdDhaCBU /tmp/tmp.JHeYSLmBBJ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.DQEosGwDxX ++ mktemp + local LAST_ERR=/tmp/tmp.FPu28PPTyP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DQEosGwDxX + cat /tmp/tmp.FPu28PPTyP + rm /tmp/tmp.DQEosGwDxX /tmp/tmp.FPu28PPTyP + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.H5D9JFrYTS ++ mktemp + local LAST_ERR=/tmp/tmp.nsdIBJCOMZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H5D9JFrYTS clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.nsdIBJCOMZ + rm /tmp/tmp.H5D9JFrYTS /tmp/tmp.nsdIBJCOMZ + return 0 + check_crd_for_deletion PR-2100-0caebf48 + local git_tag=PR-2100-0caebf48 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2100-0caebf48/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5We27xYvYY +++ mktemp ++ local LAST_ERR=/tmp/tmp.nre8jJ94LE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.5We27xYvYY ++ cat /tmp/tmp.nre8jJ94LE Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.5We27xYvYY ++ cat /tmp/tmp.nre8jJ94LE Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.5We27xYvYY ++ cat /tmp/tmp.nre8jJ94LE Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.5We27xYvYY ++ cat /tmp/tmp.nre8jJ94LE Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.5We27xYvYY /tmp/tmp.nre8jJ94LE ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.PEWkcqf5Wv ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + awk '{print$1}' + local LAST_ERR=/tmp/tmp.WCJA702ADo + local exit_status=0 + local timeout=4 ++ seq 0 2 + xargs kubectl delete ns + for i in $(seq 0 2) + set +e + kubectl get ns ++ mktemp + local LAST_OUT=/tmp/tmp.QRQytgrw3d ++ mktemp + local LAST_ERR=/tmp/tmp.WW0iD1vRoC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PEWkcqf5Wv + cat /tmp/tmp.WCJA702ADo + rm /tmp/tmp.PEWkcqf5Wv /tmp/tmp.WCJA702ADo + return 0 namespace "cert-manager" deleted namespace "tls-issue-cert-manager-25188" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QRQytgrw3d namespace "psmdb-operator" deleted + cat /tmp/tmp.WW0iD1vRoC + rm /tmp/tmp.QRQytgrw3d /tmp/tmp.WW0iD1vRoC + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pgVsX4Mwn8 ++ mktemp + local LAST_ERR=/tmp/tmp.JCohimpZrW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pgVsX4Mwn8 + cat /tmp/tmp.JCohimpZrW + rm /tmp/tmp.pgVsX4Mwn8 /tmp/tmp.JCohimpZrW + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2RAOawlhPi ++ mktemp + local LAST_ERR=/tmp/tmp.bq0WTwwhLE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2RAOawlhPi namespace/psmdb-operator created + cat /tmp/tmp.bq0WTwwhLE + rm /tmp/tmp.2RAOawlhPi /tmp/tmp.bq0WTwwhLE + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.N6FB65pAW7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kDqSBtdczv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N6FB65pAW7 ++ cat /tmp/tmp.kDqSBtdczv ++ rm /tmp/tmp.N6FB65pAW7 /tmp/tmp.kDqSBtdczv ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2100-0caebf48-7-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mSUaD25UKx ++ mktemp + local LAST_ERR=/tmp/tmp.bfOylnpZrR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2100-0caebf48-7-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mSUaD25UKx Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2100-0caebf48-7-cluster4" modified. + cat /tmp/tmp.bfOylnpZrR + rm /tmp/tmp.mSUaD25UKx /tmp/tmp.bfOylnpZrR + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2100-0caebf48' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2100-0caebf48 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.fkFcOsi9eV ++ mktemp + local LAST_ERR=/tmp/tmp.UZ5lu1T8PF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fkFcOsi9eV customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.UZ5lu1T8PF + rm /tmp/tmp.fkFcOsi9eV /tmp/tmp.UZ5lu1T8PF + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9CraziW5Ut ++ mktemp + local LAST_ERR=/tmp/tmp.ed4nlWKJnk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9CraziW5Ut clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ed4nlWKJnk + rm /tmp/tmp.9CraziW5Ut /tmp/tmp.ed4nlWKJnk + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2100-0caebf48") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.mSSSEzX4wS ++ mktemp + local LAST_ERR=/tmp/tmp.Jb7pOZonxx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mSSSEzX4wS deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.Jb7pOZonxx + rm /tmp/tmp.mSSSEzX4wS /tmp/tmp.Jb7pOZonxx + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.xCxXRDbem5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qs0VOlWEoA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xCxXRDbem5 ++ cat /tmp/tmp.Qs0VOlWEoA ++ rm /tmp/tmp.xCxXRDbem5 /tmp/tmp.Qs0VOlWEoA ++ return 0 + wait_operator_pod percona-server-mongodb-operator-7dfb6b5b69-56dcd + local pod=percona-server-mongodb-operator-7dfb6b5b69-56dcd + set +o xtrace waiting for pod/percona-server-mongodb-operator-7dfb6b5b69-56dcd to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.zw2ktaanA0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GBVjsoZmA6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zw2ktaanA0 ++ cat /tmp/tmp.GBVjsoZmA6 ++ rm /tmp/tmp.zw2ktaanA0 /tmp/tmp.GBVjsoZmA6 ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-7dfb6b5b69-56dcd ++ mktemp + local LAST_OUT=/tmp/tmp.geZLiqc6LG ++ mktemp + local LAST_ERR=/tmp/tmp.m4mXnek9X0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-7dfb6b5b69-56dcd + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.geZLiqc6LG + cat /tmp/tmp.m4mXnek9X0 + rm /tmp/tmp.geZLiqc6LG /tmp/tmp.m4mXnek9X0 + return 0 2025-10-31T15:43:06.546Z INFO setup Manager starting up {"gitCommit": "0caebf480537dd5514561b573f3a8e7b861bbc90", "gitBranch": "PR-2100-0caebf48", "buildTime": "", "goVersion": "go1.25.3", "os": "linux", "arch": "amd64"} + create_namespace tls-issue-cert-manager-14931 + local namespace=tls-issue-cert-manager-14931 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces tls-issue-cert-manager-14931' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces tls-issue-cert-manager-14931 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace tls-issue-cert-manager-14931 --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.lTlAA0oQmf ++ mktemp + local LAST_OUT=/tmp/tmp.YeH7aRIcl1 ++ mktemp + local LAST_ERR=/tmp/tmp.X8si68ayG3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.ak2okX7YjR + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace tls-issue-cert-manager-14931 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lTlAA0oQmf + cat /tmp/tmp.X8si68ayG3 + rm /tmp/tmp.lTlAA0oQmf /tmp/tmp.X8si68ayG3 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YeH7aRIcl1 + cat /tmp/tmp.ak2okX7YjR + rm /tmp/tmp.YeH7aRIcl1 /tmp/tmp.ak2okX7YjR + return 0 + kubectl_bin wait --for=delete namespace tls-issue-cert-manager-14931 ++ mktemp + local LAST_OUT=/tmp/tmp.EtDmtiV0cB ++ mktemp + local LAST_ERR=/tmp/tmp.007hl6avr7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace tls-issue-cert-manager-14931 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EtDmtiV0cB + cat /tmp/tmp.007hl6avr7 + rm /tmp/tmp.EtDmtiV0cB /tmp/tmp.007hl6avr7 + return 0 + desc 'create namespace tls-issue-cert-manager-14931' + set +o xtrace ----------------------------------------------------------------------------------- create namespace tls-issue-cert-manager-14931 ----------------------------------------------------------------------------------- + kubectl_bin create namespace tls-issue-cert-manager-14931 ++ mktemp + local LAST_OUT=/tmp/tmp.5WzT2QYZWB ++ mktemp + local LAST_ERR=/tmp/tmp.2WZ411kXLo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace tls-issue-cert-manager-14931 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5WzT2QYZWB namespace/tls-issue-cert-manager-14931 created + cat /tmp/tmp.2WZ411kXLo + rm /tmp/tmp.5WzT2QYZWB /tmp/tmp.2WZ411kXLo + return 0 + set_kube_ctx tls-issue-cert-manager-14931 + local namespace=tls-issue-cert-manager-14931 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4cYRi8KPGi +++ mktemp ++ local LAST_ERR=/tmp/tmp.oog7IvcHPW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4cYRi8KPGi ++ cat /tmp/tmp.oog7IvcHPW ++ rm /tmp/tmp.4cYRi8KPGi /tmp/tmp.oog7IvcHPW ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2100-0caebf48-7-cluster4 --namespace=tls-issue-cert-manager-14931 ++ mktemp + local LAST_OUT=/tmp/tmp.gS2sdVwC5j ++ mktemp + local LAST_ERR=/tmp/tmp.bXRHKAZg4l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2100-0caebf48-7-cluster4 --namespace=tls-issue-cert-manager-14931 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gS2sdVwC5j Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2100-0caebf48-7-cluster4" modified. + cat /tmp/tmp.bXRHKAZg4l + rm /tmp/tmp.gS2sdVwC5j /tmp/tmp.bXRHKAZg4l + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.FlY2qLXOO1 ++ mktemp + local LAST_ERR=/tmp/tmp.GpX2speXAm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FlY2qLXOO1 namespace/cert-manager created + cat /tmp/tmp.GpX2speXAm + rm /tmp/tmp.FlY2qLXOO1 /tmp/tmp.GpX2speXAm + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.TqpLpAZp48 ++ mktemp + local LAST_ERR=/tmp/tmp.1jn64KYZWa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TqpLpAZp48 namespace/cert-manager labeled + cat /tmp/tmp.1jn64KYZWa + rm /tmp/tmp.TqpLpAZp48 /tmp/tmp.1jn64KYZWa + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.eeSc2sYD5L ++ mktemp + local LAST_ERR=/tmp/tmp.Jv2ynhTxHJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eeSc2sYD5L namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.Jv2ynhTxHJ Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.eeSc2sYD5L /tmp/tmp.Jv2ynhTxHJ + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.hhAYVkVGAM ++ mktemp + local LAST_ERR=/tmp/tmp.Pdyer813Zx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hhAYVkVGAM pod/cert-manager-cainjector-5dc9c8b4f7-fk7c5 condition met pod/cert-manager-df4b69479-g922l condition met pod/cert-manager-webhook-769bbb594d-6bt5s condition met + cat /tmp/tmp.Pdyer813Zx + rm /tmp/tmp.hhAYVkVGAM /tmp/tmp.Pdyer813Zx + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.eDxXrUfELa ++ mktemp + local LAST_ERR=/tmp/tmp.Itafd6aC1E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eDxXrUfELa secret/some-users created + cat /tmp/tmp.Itafd6aC1E + rm /tmp/tmp.eDxXrUfELa /tmp/tmp.Itafd6aC1E + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.rNrjR0ohkk ++ mktemp + local LAST_ERR=/tmp/tmp.f6XvJ1CAIL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rNrjR0ohkk deployment.apps/psmdb-client created + cat /tmp/tmp.f6XvJ1CAIL + rm /tmp/tmp.rNrjR0ohkk /tmp/tmp.f6XvJ1CAIL + return 0 + desc 'create custom cert-manager issuers and certificates' + set +o xtrace ----------------------------------------------------------------------------------- create custom cert-manager issuers and certificates ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.EQWQaj1rrs ++ mktemp + local LAST_ERR=/tmp/tmp.mztXT13R9o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EQWQaj1rrs issuer.cert-manager.io/some-name-psmdb-ca-issuer created + cat /tmp/tmp.mztXT13R9o + rm /tmp/tmp.EQWQaj1rrs /tmp/tmp.mztXT13R9o + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.TUK0EEYjJg ++ mktemp + local LAST_ERR=/tmp/tmp.c7X1D4G9HC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TUK0EEYjJg issuer.cert-manager.io/some-name-psmdb-issuer created + cat /tmp/tmp.c7X1D4G9HC + rm /tmp/tmp.TUK0EEYjJg /tmp/tmp.c7X1D4G9HC + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml ++ mktemp + local LAST_OUT=/tmp/tmp.IRprATahvw ++ mktemp + local LAST_ERR=/tmp/tmp.HmVlX2rtqp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IRprATahvw certificate.cert-manager.io/some-name-ca-cert created + cat /tmp/tmp.HmVlX2rtqp Warning: spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. + rm /tmp/tmp.IRprATahvw /tmp/tmp.HmVlX2rtqp + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml ++ mktemp + local LAST_OUT=/tmp/tmp.JAo4BJzgPV ++ mktemp + local LAST_ERR=/tmp/tmp.FtUbGlB2Ys + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JAo4BJzgPV certificate.cert-manager.io/some-name-ssl-internal created + cat /tmp/tmp.FtUbGlB2Ys Warning: spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. + rm /tmp/tmp.JAo4BJzgPV /tmp/tmp.FtUbGlB2Ys + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.zdpScy9NNW ++ mktemp + local LAST_ERR=/tmp/tmp.42OXUx4ILt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zdpScy9NNW certificate.cert-manager.io/some-name-ssl created + cat /tmp/tmp.42OXUx4ILt Warning: spec.privateKey.rotationPolicy: In cert-manager >= v1.18.0, the default value changed from `Never` to `Always`. + rm /tmp/tmp.zdpScy9NNW /tmp/tmp.42OXUx4ILt + return 0 + deploy_cmctl + local service_account=cmctl + /usr/sbin/sed -e s/percona-server-mongodb-operator/cmctl/g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/rbac.yaml + yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.K3UmL6FP46 ++ mktemp + local LAST_ERR=/tmp/tmp.4JbyR5NvyD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K3UmL6FP46 role.rbac.authorization.k8s.io/cmctl created serviceaccount/cmctl created rolebinding.rbac.authorization.k8s.io/service-account-cmctl created + cat /tmp/tmp.4JbyR5NvyD + rm /tmp/tmp.K3UmL6FP46 /tmp/tmp.4JbyR5NvyD + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/conf/cmctl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.L2Yhd9bPCK ++ mktemp + local LAST_ERR=/tmp/tmp.GMpOMckrWc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/conf/cmctl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L2Yhd9bPCK deployment.apps/cmctl created + cat /tmp/tmp.GMpOMckrWc + rm /tmp/tmp.L2Yhd9bPCK /tmp/tmp.GMpOMckrWc + return 0 + sleep 60 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2100-0caebf48"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.5SGI84GMDt ++ mktemp + local LAST_ERR=/tmp/tmp.w32pcjYDdJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5SGI84GMDt perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.w32pcjYDdJ + rm /tmp/tmp.5SGI84GMDt /tmp/tmp.w32pcjYDdJ + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kGoHMyiVSb +++ mktemp ++ local LAST_ERR=/tmp/tmp.rR7k0AW9yW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kGoHMyiVSb ++ cat /tmp/tmp.rR7k0AW9yW ++ rm /tmp/tmp.kGoHMyiVSb /tmp/tmp.rR7k0AW9yW ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y0f0nqfdYN +++ mktemp ++ local LAST_ERR=/tmp/tmp.qvTcVs9JCA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y0f0nqfdYN ++ cat /tmp/tmp.qvTcVs9JCA ++ rm /tmp/tmp.Y0f0nqfdYN /tmp/tmp.qvTcVs9JCA ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T6dj3Cfjb4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hYTULSTyib ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T6dj3Cfjb4 ++ cat /tmp/tmp.hYTULSTyib ++ rm /tmp/tmp.T6dj3Cfjb4 /tmp/tmp.hYTULSTyib ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................. + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SUjWcW5O6P +++ mktemp ++ local LAST_ERR=/tmp/tmp.taOPxRIgAJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SUjWcW5O6P ++ cat /tmp/tmp.taOPxRIgAJ ++ rm /tmp/tmp.SUjWcW5O6P /tmp/tmp.taOPxRIgAJ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9USTu6oRRj +++ mktemp ++ local LAST_ERR=/tmp/tmp.tq8tCDZMni ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9USTu6oRRj ++ cat /tmp/tmp.tq8tCDZMni ++ rm /tmp/tmp.9USTu6oRRj /tmp/tmp.tq8tCDZMni ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YGXCDeij3P +++ mktemp ++ local LAST_ERR=/tmp/tmp.irHCJtY8wp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YGXCDeij3P ++ cat /tmp/tmp.irHCJtY8wp ++ rm /tmp/tmp.YGXCDeij3P /tmp/tmp.irHCJtY8wp ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4Ryc1oAcg7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HDhxAlUnct ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4Ryc1oAcg7 ++ cat /tmp/tmp.HDhxAlUnct ++ rm /tmp/tmp.4Ryc1oAcg7 /tmp/tmp.HDhxAlUnct ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.STaeVnTXvG +++ mktemp ++ local LAST_ERR=/tmp/tmp.ygzLFmj8y5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.STaeVnTXvG ++ cat /tmp/tmp.ygzLFmj8y5 ++ rm /tmp/tmp.STaeVnTXvG /tmp/tmp.ygzLFmj8y5 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZbAWstUQgf +++ mktemp ++ local LAST_ERR=/tmp/tmp.OgVYkhiRBJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZbAWstUQgf ++ cat /tmp/tmp.OgVYkhiRBJ ++ rm /tmp/tmp.ZbAWstUQgf /tmp/tmp.OgVYkhiRBJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'compare custom certificates and issuers' + set +o xtrace ----------------------------------------------------------------------------------- compare custom certificates and issuers ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl -custom + local resource=certificate/some-name-ssl + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml + local new_result=/tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.D4ZoRiyIgI ++ mktemp + local LAST_ERR=/tmp/tmp.6qDJuMQO18 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D4ZoRiyIgI + cat /tmp/tmp.6qDJuMQO18 + rm /tmp/tmp.D4ZoRiyIgI /tmp/tmp.6qDJuMQO18 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + log 'compare_kubectl: certificate/some-name-ssl OK' + set +o xtrace [2025-10-31T15:50:14+0000] compare_kubectl: certificate/some-name-ssl OK + compare_kubectl certificate/some-name-ssl-internal -custom + local resource=certificate/some-name-ssl-internal + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml + local new_result=/tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.nRME5ml64N ++ mktemp + local LAST_ERR=/tmp/tmp.VbweRWChc7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nRME5ml64N + cat /tmp/tmp.VbweRWChc7 + rm /tmp/tmp.nRME5ml64N /tmp/tmp.VbweRWChc7 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + log 'compare_kubectl: certificate/some-name-ssl-internal OK' + set +o xtrace [2025-10-31T15:50:15+0000] compare_kubectl: certificate/some-name-ssl-internal OK + compare_kubectl certificate/some-name-ca-cert -custom + local resource=certificate/some-name-ca-cert + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml + local new_result=/tmp/tmp.hnuVMpDSnH/certificate_some-name-ca-cert.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ca-cert + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.kpx8wtTf9T ++ mktemp + local LAST_ERR=/tmp/tmp.D7mlD6TW8y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ca-cert + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kpx8wtTf9T + cat /tmp/tmp.D7mlD6TW8y + rm /tmp/tmp.kpx8wtTf9T /tmp/tmp.D7mlD6TW8y + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ca-cert.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ca-cert.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ca-cert.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml /tmp/tmp.hnuVMpDSnH/certificate_some-name-ca-cert.yml + log 'compare_kubectl: certificate/some-name-ca-cert OK' + set +o xtrace [2025-10-31T15:50:16+0000] compare_kubectl: certificate/some-name-ca-cert OK + compare_kubectl issuer/some-name-psmdb-ca-issuer -custom + local resource=issuer/some-name-psmdb-ca-issuer + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml + local new_result=/tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.3gRKiaZ6O1 ++ mktemp + local LAST_ERR=/tmp/tmp.922u9ZLH9b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3gRKiaZ6O1 + cat /tmp/tmp.922u9ZLH9b + rm /tmp/tmp.3gRKiaZ6O1 /tmp/tmp.922u9ZLH9b + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-ca-issuer OK' + set +o xtrace [2025-10-31T15:50:17+0000] compare_kubectl: issuer/some-name-psmdb-ca-issuer OK + compare_kubectl issuer/some-name-psmdb-issuer -custom + local resource=issuer/some-name-psmdb-issuer + local postfix=-custom + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml + local new_result=/tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.I7jXBiYXPP ++ mktemp + local LAST_ERR=/tmp/tmp.nZ5SSaT416 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I7jXBiYXPP + cat /tmp/tmp.nZ5SSaT416 + rm /tmp/tmp.I7jXBiYXPP /tmp/tmp.nZ5SSaT416 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-issuer OK' + set +o xtrace [2025-10-31T15:50:18+0000] compare_kubectl: issuer/some-name-psmdb-issuer OK + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl delete psmdb --all perconaservermongodb.psmdb.percona.com "some-name" deleted from tls-issue-cert-manager-14931 namespace + wait_for_delete psmdb/some-name 180 + local res=psmdb/some-name + local wait_time=180 + set +o xtrace waiting for psmdb/some-name to be deletedError from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "some-name" not found + kubectl delete pvc --all persistentvolumeclaim "mongod-data-some-name-cfg-0" deleted from tls-issue-cert-manager-14931 namespace persistentvolumeclaim "mongod-data-some-name-cfg-1" deleted from tls-issue-cert-manager-14931 namespace persistentvolumeclaim "mongod-data-some-name-cfg-2" deleted from tls-issue-cert-manager-14931 namespace persistentvolumeclaim "mongod-data-some-name-rs0-0" deleted from tls-issue-cert-manager-14931 namespace persistentvolumeclaim "mongod-data-some-name-rs0-1" deleted from tls-issue-cert-manager-14931 namespace persistentvolumeclaim "mongod-data-some-name-rs0-2" deleted from tls-issue-cert-manager-14931 namespace + desc 'delete custom cert-manager issuers and certificates' + set +o xtrace ----------------------------------------------------------------------------------- delete custom cert-manager issuers and certificates ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.d3VOOGKdoJ ++ mktemp + local LAST_ERR=/tmp/tmp.tCvLacV2Kn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d3VOOGKdoJ issuer.cert-manager.io "some-name-psmdb-ca-issuer" deleted from tls-issue-cert-manager-14931 namespace + cat /tmp/tmp.tCvLacV2Kn + rm /tmp/tmp.d3VOOGKdoJ /tmp/tmp.tCvLacV2Kn + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.xFCtEw3in6 ++ mktemp + local LAST_ERR=/tmp/tmp.xsO3beRQOs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xFCtEw3in6 issuer.cert-manager.io "some-name-psmdb-issuer" deleted from tls-issue-cert-manager-14931 namespace + cat /tmp/tmp.xsO3beRQOs + rm /tmp/tmp.xFCtEw3in6 /tmp/tmp.xsO3beRQOs + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml ++ mktemp + local LAST_OUT=/tmp/tmp.G8PrO5fJzA ++ mktemp + local LAST_ERR=/tmp/tmp.AvvKtvZP88 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G8PrO5fJzA certificate.cert-manager.io "some-name-ca-cert" deleted from tls-issue-cert-manager-14931 namespace + cat /tmp/tmp.AvvKtvZP88 + rm /tmp/tmp.G8PrO5fJzA /tmp/tmp.AvvKtvZP88 + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml ++ mktemp + local LAST_OUT=/tmp/tmp.1LQAjyv2QB ++ mktemp + local LAST_ERR=/tmp/tmp.0CVhaWBFRK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1LQAjyv2QB certificate.cert-manager.io "some-name-ssl-internal" deleted from tls-issue-cert-manager-14931 namespace + cat /tmp/tmp.0CVhaWBFRK + rm /tmp/tmp.1LQAjyv2QB /tmp/tmp.0CVhaWBFRK + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.YbkBpaLxya ++ mktemp + local LAST_ERR=/tmp/tmp.j4m3uOI1fH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YbkBpaLxya certificate.cert-manager.io "some-name-ssl" deleted from tls-issue-cert-manager-14931 namespace + cat /tmp/tmp.j4m3uOI1fH + rm /tmp/tmp.YbkBpaLxya /tmp/tmp.j4m3uOI1fH + return 0 + sleep 30 + desc 'delete ssl secrets, operator should recreate them' + set +o xtrace ----------------------------------------------------------------------------------- delete ssl secrets, operator should recreate them ----------------------------------------------------------------------------------- + kubectl_bin delete secret some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.DU5ld2zoZ4 ++ mktemp + local LAST_ERR=/tmp/tmp.3sPcaWHlCe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete secret some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DU5ld2zoZ4 secret "some-name-ssl-internal" deleted from tls-issue-cert-manager-14931 namespace + cat /tmp/tmp.3sPcaWHlCe + rm /tmp/tmp.DU5ld2zoZ4 /tmp/tmp.3sPcaWHlCe + return 0 + kubectl_bin delete secret some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.wCvDtbNv9Z ++ mktemp + local LAST_ERR=/tmp/tmp.9ucYdhZ9e2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete secret some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wCvDtbNv9Z secret "some-name-ssl" deleted from tls-issue-cert-manager-14931 namespace + cat /tmp/tmp.9ucYdhZ9e2 + rm /tmp/tmp.wCvDtbNv9Z /tmp/tmp.9ucYdhZ9e2 + return 0 + sleep 30 + desc 'recreate PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- recreate PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2100-0caebf48"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.lYsksduwZW ++ mktemp + local LAST_ERR=/tmp/tmp.sA71RC1zl0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lYsksduwZW perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.sA71RC1zl0 + rm /tmp/tmp.lYsksduwZW /tmp/tmp.sA71RC1zl0 + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fuC7V5WS2z +++ mktemp ++ local LAST_ERR=/tmp/tmp.9DdwvWgx6d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fuC7V5WS2z ++ cat /tmp/tmp.9DdwvWgx6d ++ rm /tmp/tmp.fuC7V5WS2z /tmp/tmp.9DdwvWgx6d ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sh8DkYc7M2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.piqq3yUnhu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Sh8DkYc7M2 ++ cat /tmp/tmp.piqq3yUnhu ++ rm /tmp/tmp.Sh8DkYc7M2 /tmp/tmp.piqq3yUnhu ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F90HubnUST +++ mktemp ++ local LAST_ERR=/tmp/tmp.lFst8REbJ4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F90HubnUST ++ cat /tmp/tmp.lFst8REbJ4 ++ rm /tmp/tmp.F90HubnUST /tmp/tmp.lFst8REbJ4 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WcylpKqKFZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.VoseE3tcYr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WcylpKqKFZ ++ cat /tmp/tmp.VoseE3tcYr ++ rm /tmp/tmp.WcylpKqKFZ /tmp/tmp.VoseE3tcYr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lNf5XWc1DS +++ mktemp ++ local LAST_ERR=/tmp/tmp.J8J0pTlqrH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lNf5XWc1DS ++ cat /tmp/tmp.J8J0pTlqrH ++ rm /tmp/tmp.lNf5XWc1DS /tmp/tmp.J8J0pTlqrH ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EyhFgJweRT +++ mktemp ++ local LAST_ERR=/tmp/tmp.VpFirkZbUh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EyhFgJweRT ++ cat /tmp/tmp.VpFirkZbUh ++ rm /tmp/tmp.EyhFgJweRT /tmp/tmp.VpFirkZbUh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UCWzTtZNug +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z2o99t0cVY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UCWzTtZNug ++ cat /tmp/tmp.Z2o99t0cVY ++ rm /tmp/tmp.UCWzTtZNug /tmp/tmp.Z2o99t0cVY ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bgyruSsDtQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.VDWHjrGps6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bgyruSsDtQ ++ cat /tmp/tmp.VDWHjrGps6 ++ rm /tmp/tmp.bgyruSsDtQ /tmp/tmp.VDWHjrGps6 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AbrVdiM94C +++ mktemp ++ local LAST_ERR=/tmp/tmp.D9epICb1Nx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AbrVdiM94C ++ cat /tmp/tmp.D9epICb1Nx ++ rm /tmp/tmp.AbrVdiM94C /tmp/tmp.D9epICb1Nx ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.148TDTI23K ++ mktemp + local LAST_ERR=/tmp/tmp.4iLZxEM2JD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.148TDTI23K + cat /tmp/tmp.4iLZxEM2JD + rm /tmp/tmp.148TDTI23K /tmp/tmp.4iLZxEM2JD + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-10-31T15:54:42+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.a3I67hfVOi ++ mktemp + local LAST_ERR=/tmp/tmp.oqU2gmG1pP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a3I67hfVOi + cat /tmp/tmp.oqU2gmG1pP + rm /tmp/tmp.a3I67hfVOi /tmp/tmp.oqU2gmG1pP + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-10-31T15:54:43+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.HGoyz5MPzx ++ mktemp + local LAST_ERR=/tmp/tmp.h6W4kafvTG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HGoyz5MPzx + cat /tmp/tmp.h6W4kafvTG + rm /tmp/tmp.HGoyz5MPzx /tmp/tmp.h6W4kafvTG + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-10-31T15:54:44+0000] compare_kubectl: statefulset/some-name-mongos OK + desc 'check if certificates issued with certmanager' + set +o xtrace ----------------------------------------------------------------------------------- check if certificates issued with certmanager ----------------------------------------------------------------------------------- + check_tls_secret some-name-ssl + local secret_name=some-name-ssl + check_secret_data_key some-name-ssl ca.crt + local secret_name=some-name-ssl + local data_key=ca.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["ca.crt"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e2AAsEEBgS +++ mktemp ++ local LAST_ERR=/tmp/tmp.KeIDnIYwsd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e2AAsEEBgS ++ cat /tmp/tmp.KeIDnIYwsd ++ rm /tmp/tmp.e2AAsEEBgS /tmp/tmp.KeIDnIYwsd ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lVVDdnYS9yOENRRWhIRlovemxTZk4rOGtLbDljd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEF6TVRFMU5EWXhPRm9YRFRJMgpNVEF6TVRFMU5EWXhPRm93RnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNSUlCSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF6bjhGNTN1QlBxakNpZzNocTQ3cFluR01uS1B4WmdsMFpsU1kKSnR5TG1sNXFTbm9yMzZGYmRJaFRqS1VaUXQ0M2RRcDFNYTltRVNKb2VVV1BZU3lSYUthdHhJaVYwOEpSYWczVwpoc3BiL28zNFdIcFNFV2J5WDBtZnFRbEc1MjVMQ2VLckZseW5HamJKaGp3RHBEY21ld1JLTTIxbGNBQXdrNm5WCk8xYVcwOWxVYW8yUlhXSmhPSE5LY1BiWDVJVUpSMFVuWDltdGNBd2dQOEJhTkxDQUpaTHlUMVFDQWdJdzg3SE0KYnJyaEZRdTJFQWVqenFMQk5tMGllV2VOLzB6QU5Od3k4U3RwZEs2ZzYxdk9KZVNkbGt4ZmhZdHNMZmVrdnQ1MwpGMk1PMVlKMjUvR2Vmb0lOeHlSUW5RSnpkMnB6dUtiVEpMeTZDcXJqeTJoUkE0SjFyd0lEQVFBQm8wSXdRREFPCkJnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVRHc5VVZ1YisKRVdpYllzWXZUR1c5L0g0Q3VHMHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQjRua1doWlltM09vaVdCdVFNKwp5NllFMW42dk8wZjJ5Z0dxbzRTaFJ0Q2UvN3FLTWpjMzVXVXFweU1vbVpTVVBKelMxc05rZG9SRGF3OGVyalIwClRwakJLeklWWm8xK1hJVXlnb3lmM1p5WjhTVzViR0JWVHhEeDZ6YndBTVFvZFp0cE1rN3FiUjFJWUVQSjdqL2cKbWExQ3dZckFBUDVCY0dWYmQxOFp1VHNnZE45U1NhRXp0UXJPUURPZTdXdkUyTkVUMk9xa1BKQURaMHBlMEtITApwSTFzVm9KRXlyWTRhVkRuR2NtWUVPVGJIV3RHdU12T0pLdGRtMXpod0RNMzFEaFd6Zmk5VllWQ1A2MTBXWmZ4CmFMRWxKamxFU1MxTEtiWk5ZZHlZWnhlaGxwcGdaQkFjcmViTGIybTREOEsrdW9LMlltSXBCL2JHdGxOZ1VJWlUKdWFzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lVVDdnYS9yOENRRWhIRlovemxTZk4rOGtLbDljd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEF6TVRFMU5EWXhPRm9YRFRJMgpNVEF6TVRFMU5EWXhPRm93RnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNSUlCSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF6bjhGNTN1QlBxakNpZzNocTQ3cFluR01uS1B4WmdsMFpsU1kKSnR5TG1sNXFTbm9yMzZGYmRJaFRqS1VaUXQ0M2RRcDFNYTltRVNKb2VVV1BZU3lSYUthdHhJaVYwOEpSYWczVwpoc3BiL28zNFdIcFNFV2J5WDBtZnFRbEc1MjVMQ2VLckZseW5HamJKaGp3RHBEY21ld1JLTTIxbGNBQXdrNm5WCk8xYVcwOWxVYW8yUlhXSmhPSE5LY1BiWDVJVUpSMFVuWDltdGNBd2dQOEJhTkxDQUpaTHlUMVFDQWdJdzg3SE0KYnJyaEZRdTJFQWVqenFMQk5tMGllV2VOLzB6QU5Od3k4U3RwZEs2ZzYxdk9KZVNkbGt4ZmhZdHNMZmVrdnQ1MwpGMk1PMVlKMjUvR2Vmb0lOeHlSUW5RSnpkMnB6dUtiVEpMeTZDcXJqeTJoUkE0SjFyd0lEQVFBQm8wSXdRREFPCkJnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVRHc5VVZ1YisKRVdpYllzWXZUR1c5L0g0Q3VHMHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQjRua1doWlltM09vaVdCdVFNKwp5NllFMW42dk8wZjJ5Z0dxbzRTaFJ0Q2UvN3FLTWpjMzVXVXFweU1vbVpTVVBKelMxc05rZG9SRGF3OGVyalIwClRwakJLeklWWm8xK1hJVXlnb3lmM1p5WjhTVzViR0JWVHhEeDZ6YndBTVFvZFp0cE1rN3FiUjFJWUVQSjdqL2cKbWExQ3dZckFBUDVCY0dWYmQxOFp1VHNnZE45U1NhRXp0UXJPUURPZTdXdkUyTkVUMk9xa1BKQURaMHBlMEtITApwSTFzVm9KRXlyWTRhVkRuR2NtWUVPVGJIV3RHdU12T0pLdGRtMXpod0RNMzFEaFd6Zmk5VllWQ1A2MTBXWmZ4CmFMRWxKamxFU1MxTEtiWk5ZZHlZWnhlaGxwcGdaQkFjcmViTGIybTREOEsrdW9LMlltSXBCL2JHdGxOZ1VJWlUKdWFzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="' ']' + check_secret_data_key some-name-ssl tls.crt + local secret_name=some-name-ssl + local data_key=tls.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["tls.crt"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hxtOeTlxmp +++ mktemp ++ local LAST_ERR=/tmp/tmp.j2texEQVi1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hxtOeTlxmp ++ cat /tmp/tmp.j2texEQVi1 ++ rm /tmp/tmp.hxtOeTlxmp /tmp/tmp.j2texEQVi1 ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg2ekNDQnRPZ0F3SUJBZ0lVZHExcTFTQU1JMFRaQitZaXZpM2ZURlA2VzhBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEF6TVRFMU5URTFNRm9YRFRJMgpNREV5T1RFMU5URTFNRm93SkRFT01Bd0dBMVVFQ2hNRlVGTk5SRUl4RWpBUUJnTlZCQU1UQ1hOdmJXVXRibUZ0ClpUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU5ZWkh6bHQ5N1hyYzhOc3Z6dWQKcmcrNnA4end6Z3gzZ2JFU3JabytnTkVaQ1l1c1U1eFNkUzFETUsxNXY3RG95aE5XTEVxNjJUU2FYeXQ2UWdaaQoyVmw5a25pK0lJV1ZuOHJjazYwblFqTXllT08rRnlBL1NPM1Q0dFdKK21Zc0tuSHVleXA3QlhCSTRjM0t5R2IzClhPZWFIL0llaVdBTVJzaW5PenNnUHZaVG1WaWpYWWI4REdJdnNNeXN2REQ0WGhicyt4MXB1T1dIUm5mSVFYenIKR1JFdk51R2F4cmgxc3JmbC9WRVNhZ1dodlliS3g0Wk9VaE5IZThFNzRLK3NrUTVoakFlWHBpeXdCd2laMlJBbwp3WlVVcExuOEdHb0NlYXJiRTBtTy9QREwvY244S3BpalRWNlVlYnpURkFReThwMzd6ZUVQS0Rhd2tXbytwZ0h4CnBuOENBd0VBQWFPQ0JTQXdnZ1VjTUE0R0ExVWREd0VCL3dRRUF3SUZvREFNQmdOVkhSTUJBZjhFQWpBQU1COEcKQTFVZEl3UVlNQmFBRkE4UFZGYm0vaEZvbTJMR0wweGx2ZngrQXJodE1JSUUyUVlEVlIwUkJJSUUwRENDQk15QwpDV3h2WTJGc2FHOXpkSUlOYzI5dFpTMXVZVzFsTFhKek1JSXFjMjl0WlMxdVlXMWxMWEp6TUM1MGJITXRhWE56CmRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFME9UTXhnanh6YjIxbExXNWhiV1V0Y25Nd0xuUnNjeTFwYzNOMVpTMWoKWlhKMExXMWhibUZuWlhJdE1UUTVNekV1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDRHlvdWMyOXRaUzF1WVcxbApMWEp6TUlJc0tpNXpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVFE1Ck16R0NQaW91YzI5dFpTMXVZVzFsTFhKek1DNTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFME9UTXgKTG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2o5emIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqWlhKMApMVzFoYm1GblpYSXRNVFE1TXpFdWMzWmpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsCkxYSnpNQzUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTBPVE14TG5OMll5NWpiSFZ6ZEdWeWMyVjAKTG14dlkyRnNnak1xTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVFE1TXpFdWMzWmpMbU5zZFhOMApaWEp6WlhRdWJHOWpZV3lDRUhOdmJXVXRibUZ0WlMxdGIyNW5iM09DTFhOdmJXVXRibUZ0WlMxdGIyNW5iM011CmRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TkRrek1ZSS9jMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTAKYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTBPVE14TG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2hJcQpMbk52YldVdGJtRnRaUzF0YjI1bmIzT0NMeW91YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEUwT1RNeGdrRXFMbk52YldVdGJtRnRaUzF0YjI1bmIzTXVkR3h6TFdsemMzVmwKTFdObGNuUXRiV0Z1WVdkbGNpMHhORGt6TVM1emRtTXVZMngxYzNSbGNpNXNiMk5oYklJTmMyOXRaUzF1WVcxbApMV05tWjRJcWMyOXRaUzF1WVcxbExXTm1aeTUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTBPVE14CmdqeHpiMjFsTFc1aGJXVXRZMlpuTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVFE1TXpFdWMzWmoKTG1Oc2RYTjBaWEl1Ykc5allXeUNEeW91YzI5dFpTMXVZVzFsTFdObVo0SXNLaTV6YjIxbExXNWhiV1V0WTJabgpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UUTVNekdDUGlvdWMyOXRaUzF1WVcxbExXTm1aeTUwCmJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEUwT1RNeExuTjJZeTVqYkhWemRHVnlMbXh2WTJGc2drSnoKYjIxbExXNWhiV1V0Ylc5dVoyOXpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UUTVNekV1YzNaagpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNSQ291YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEUwT1RNeExuTjJZeTVqYkhWemRHVnljMlYwTG14dlkyRnNnajl6YjIxbExXNWgKYldVdFkyWm5MblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UUTVNekV1YzNaakxtTnNkWE4wWlhKegpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsTFdObVp5NTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5CkxURTBPVE14TG5OMll5NWpiSFZ6ZEdWeWMyVjBMbXh2WTJGc01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQUoKK2E4QnBHaTZtQ0ZCbFpNOEs4dlhsYVN0TGdEREtYdlpIdGdoemRJU3huaWYyRk5QcDBWWkNHb2h5RGJGNmZwZgpBdEI1dk1iYmxWaXF2RDlBTnVzMG5udFdWZzlndCtFWW1EYUM1QjdIdmhtQXFqdjIyVEM5RHllK1dCcXdEQ2dnClM5cktFVVRwcDBUUlJvNi9qMnloSUthRDZZTmlIMUY4cDQ2aG9pUmdReXJxY2lRNkZrWm5OKzJSR3dkQ2N2Z3YKNm81enhweEY4R2NXSS9ncmlHQjNDQTI2ZjRlR1d3eDVmQ1BBUWl3TXpncmlCU0Fjb2JuWmxYNTdFM2ZwMVQ4Vgp4RUJ3OTN6bk5iZTJJRnJDR3BiL3ZjVEZoTmxmaVNObXpsdXVlVUJTWUZFMXAxSk5Xc01McFF3d0dQbmdPLzU3CmpELzI1UDl5WUpMZHZuU0owakZwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg2ekNDQnRPZ0F3SUJBZ0lVZHExcTFTQU1JMFRaQitZaXZpM2ZURlA2VzhBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF4TU1jMjl0WlMxdVlXMWxMV05oTUI0WERUSTFNVEF6TVRFMU5URTFNRm9YRFRJMgpNREV5T1RFMU5URTFNRm93SkRFT01Bd0dBMVVFQ2hNRlVGTk5SRUl4RWpBUUJnTlZCQU1UQ1hOdmJXVXRibUZ0ClpUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU5ZWkh6bHQ5N1hyYzhOc3Z6dWQKcmcrNnA4end6Z3gzZ2JFU3JabytnTkVaQ1l1c1U1eFNkUzFETUsxNXY3RG95aE5XTEVxNjJUU2FYeXQ2UWdaaQoyVmw5a25pK0lJV1ZuOHJjazYwblFqTXllT08rRnlBL1NPM1Q0dFdKK21Zc0tuSHVleXA3QlhCSTRjM0t5R2IzClhPZWFIL0llaVdBTVJzaW5PenNnUHZaVG1WaWpYWWI4REdJdnNNeXN2REQ0WGhicyt4MXB1T1dIUm5mSVFYenIKR1JFdk51R2F4cmgxc3JmbC9WRVNhZ1dodlliS3g0Wk9VaE5IZThFNzRLK3NrUTVoakFlWHBpeXdCd2laMlJBbwp3WlVVcExuOEdHb0NlYXJiRTBtTy9QREwvY244S3BpalRWNlVlYnpURkFReThwMzd6ZUVQS0Rhd2tXbytwZ0h4CnBuOENBd0VBQWFPQ0JTQXdnZ1VjTUE0R0ExVWREd0VCL3dRRUF3SUZvREFNQmdOVkhSTUJBZjhFQWpBQU1COEcKQTFVZEl3UVlNQmFBRkE4UFZGYm0vaEZvbTJMR0wweGx2ZngrQXJodE1JSUUyUVlEVlIwUkJJSUUwRENDQk15QwpDV3h2WTJGc2FHOXpkSUlOYzI5dFpTMXVZVzFsTFhKek1JSXFjMjl0WlMxdVlXMWxMWEp6TUM1MGJITXRhWE56CmRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFME9UTXhnanh6YjIxbExXNWhiV1V0Y25Nd0xuUnNjeTFwYzNOMVpTMWoKWlhKMExXMWhibUZuWlhJdE1UUTVNekV1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDRHlvdWMyOXRaUzF1WVcxbApMWEp6TUlJc0tpNXpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVFE1Ck16R0NQaW91YzI5dFpTMXVZVzFsTFhKek1DNTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRFME9UTXgKTG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2o5emIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqWlhKMApMVzFoYm1GblpYSXRNVFE1TXpFdWMzWmpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsCkxYSnpNQzUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTBPVE14TG5OMll5NWpiSFZ6ZEdWeWMyVjAKTG14dlkyRnNnak1xTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVFE1TXpFdWMzWmpMbU5zZFhOMApaWEp6WlhRdWJHOWpZV3lDRUhOdmJXVXRibUZ0WlMxdGIyNW5iM09DTFhOdmJXVXRibUZ0WlMxdGIyNW5iM011CmRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TkRrek1ZSS9jMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTAKYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTBPVE14TG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2hJcQpMbk52YldVdGJtRnRaUzF0YjI1bmIzT0NMeW91YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEUwT1RNeGdrRXFMbk52YldVdGJtRnRaUzF0YjI1bmIzTXVkR3h6TFdsemMzVmwKTFdObGNuUXRiV0Z1WVdkbGNpMHhORGt6TVM1emRtTXVZMngxYzNSbGNpNXNiMk5oYklJTmMyOXRaUzF1WVcxbApMV05tWjRJcWMyOXRaUzF1WVcxbExXTm1aeTUwYkhNdGFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURTBPVE14CmdqeHpiMjFsTFc1aGJXVXRZMlpuTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVFE1TXpFdWMzWmoKTG1Oc2RYTjBaWEl1Ykc5allXeUNEeW91YzI5dFpTMXVZVzFsTFdObVo0SXNLaTV6YjIxbExXNWhiV1V0WTJabgpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UUTVNekdDUGlvdWMyOXRaUzF1WVcxbExXTm1aeTUwCmJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEUwT1RNeExuTjJZeTVqYkhWemRHVnlMbXh2WTJGc2drSnoKYjIxbExXNWhiV1V0Ylc5dVoyOXpMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UUTVNekV1YzNaagpMbU5zZFhOMFpYSnpaWFF1Ykc5allXeUNSQ291YzI5dFpTMXVZVzFsTFcxdmJtZHZjeTUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEUwT1RNeExuTjJZeTVqYkhWemRHVnljMlYwTG14dlkyRnNnajl6YjIxbExXNWgKYldVdFkyWm5MblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1UUTVNekV1YzNaakxtTnNkWE4wWlhKegpaWFF1Ykc5allXeUNRU291YzI5dFpTMXVZVzFsTFdObVp5NTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5CkxURTBPVE14TG5OMll5NWpiSFZ6ZEdWeWMyVjBMbXh2WTJGc01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQUoKK2E4QnBHaTZtQ0ZCbFpNOEs4dlhsYVN0TGdEREtYdlpIdGdoemRJU3huaWYyRk5QcDBWWkNHb2h5RGJGNmZwZgpBdEI1dk1iYmxWaXF2RDlBTnVzMG5udFdWZzlndCtFWW1EYUM1QjdIdmhtQXFqdjIyVEM5RHllK1dCcXdEQ2dnClM5cktFVVRwcDBUUlJvNi9qMnloSUthRDZZTmlIMUY4cDQ2aG9pUmdReXJxY2lRNkZrWm5OKzJSR3dkQ2N2Z3YKNm81enhweEY4R2NXSS9ncmlHQjNDQTI2ZjRlR1d3eDVmQ1BBUWl3TXpncmlCU0Fjb2JuWmxYNTdFM2ZwMVQ4Vgp4RUJ3OTN6bk5iZTJJRnJDR3BiL3ZjVEZoTmxmaVNObXpsdXVlVUJTWUZFMXAxSk5Xc01McFF3d0dQbmdPLzU3CmpELzI1UDl5WUpMZHZuU0owakZwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"' ']' + check_secret_data_key some-name-ssl tls.key + local secret_name=some-name-ssl + local data_key=tls.key + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["tls.key"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O3RLUd1FKa +++ mktemp ++ local LAST_ERR=/tmp/tmp.kwYY3l5AtS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O3RLUd1FKa ++ cat /tmp/tmp.kwYY3l5AtS ++ rm /tmp/tmp.O3RLUd1FKa /tmp/tmp.kwYY3l5AtS ++ return 0 + secret_data='"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMWhrZk9XMzN0ZXR6dzJ5L081MnVEN3FuelBET0RIZUJzUkt0bWo2QTBSa0ppNnhUCm5GSjFMVU13clhtL3NPaktFMVlzU3JyWk5KcGZLM3BDQm1MWldYMlNlTDRnaFpXZnl0eVRyU2RDTXpKNDQ3NFgKSUQ5STdkUGkxWW42Wml3cWNlNTdLbnNGY0VqaHpjckladmRjNTVvZjhoNkpZQXhHeUtjN095QSs5bE9aV0tOZApodndNWWkrd3pLeThNUGhlRnV6N0hXbTQ1WWRHZDhoQmZPc1pFUzgyNFpyR3VIV3l0K1g5VVJKcUJhRzloc3JICmhrNVNFMGQ3d1R2Z3I2eVJEbUdNQjVlbUxMQUhDSm5aRUNqQmxSU2t1ZndZYWdKNXF0c1RTWTc4OE12OXlmd3EKbUtOTlhwUjV2Tk1VQkRMeW5mdk40UThvTnJDUmFqNm1BZkdtZndJREFRQUJBb0lCQUF1NHJHRDBpRTlKYUxrVwpSeEg0ZkFNWWNQOWpYNlNIRm02Zld5bktxanJJYUNGaGVlSmhFVkU5RUhrdTVUUEdTdW5zR2RpdGloNjdKT0s0CmVzem10SWVWdkdNbjVaZnFyU2pxRXB1MFBvZVhCODlRSk92N0N2OGFUcHpxUk84N0U4ZlJDcVUwdndONnVnSU8KVEJKM0VIc05rUm9zV2RFTVd3TzVSQ0RranczbFJDVzhOcVk3NlBpNzJjcTFSUkp3bG43WVRhdk9EV0tQQlpZegp6OFFpeWtMWC9UN2l6MVg0dElhdy9vS2xTeDFweUFKQXdIclF3eklFSjRtM3ZnTlUwcFRJSUk0U2g5SVZ1bXJaCi85bTJLWUJMQWdrNUpWbVdGRElmUjVLazZsNnRwWVcrYVpnRVNmLzJCRmxqeFExbEMyOHVBcldDN1RCdjVJVFcKMENQRmRZRUNnWUVBNGxWS1RENjM4aGo5RGJZenFoY0IrRktZKzdmUmM4NTdIeTNzZGdNRFFPSGlCY3FBNmVPcwpOeVovY1NFMjA1Qjc0Q2VVTm1VMEpuaE94emxHRFgxazdjWkd0NEVjQlkvNmRqNTJRbmZLa2cwTXZxWkNtU3paCkxCMmU2czJ4TEMrUWUyR3hCdDBDOVNVZC94WHZubHBLanBTTG9FdGtvd3lTeVFFeHMwWmpIMDhDZ1lFQThpbEgKb1lTV1duUTczL3JiUXpYUUZuZWtjVU1VNXlhM0dGeFBQVnBVYU1PTCtQS1EvK0VPWHNrNnk5c2I5aWhRQUhraApRSzIydFluazV1bWQ4VHFqcnlkZ1h4SWJVRjg5RjcyTlJ2MktNVnFkMmNuZGp3YXhCakE3d0hab3V4ZjkyWDhoCk85RjNySzhMYnY0MkROY2kxbmJ0VlFEYnMzR3NhWnRUOFI4THVkRUNnWUVBelc2MjRsK25ISko3MWQ5RUk2V0EKZjBHUlpDQ2pkMmE2TUVIQnVCZmg4eGxUeXdmVVhQYnUwSndiM3JaVUNsSEJibUxlVDBVcXdhUS9kT3JTWDlvcQpLdnZtSWhlSk5LOUxtZG4yQXp5eDNFRUdDV0hsWHh6cS9mVGcwcVRpcElXRHNjbXBJZDZuR1pXWUZYbDJwaGUwClJwZ2o1OW1tdmF2aTY1a3U0RmNLRDFjQ2dZRUE0aWgzUTN0bVIvZ0I2ejZIQkEvLzFOdkw4aE5xTkM3T0ZLd3gKUE9WZHE3SGt5WTBRcWtodmVVcUo1TlV2R2dld0g2RUEwenpwUkVZU1J5MnJXaDBmdlhjUzQvY3ZGeks3QmE3NwpnRzlOWHFaNGdXdXJ4ZVhEOGptZHRETmh3RW01eENTY3p4YmpvNXlmR3Z2RFV0NmhKSlpSTkcvbXY0SE1PWnpkCkduV2FKK0VDZ1lBenFna3Rkdk8xeXR1ZjhENFZERlYvNEJ6WW1qVjFxMWExNTQ1L05EVGIvTnhTcGQyZGQzcGoKWlFBcEtnSExuMDFwT2ZPdURsclh5b2VLckdGSzlRZG1HZTg0NzZjcUhROE1FQS8vMFZ2UVZ4YW9BdDZPVHZMZQpIK1VYYnlDcjZ1UTY2TkdhQ01MV2hHZ05KU0ZuNlV5dkdBRnBLbmRMdHY1cEhnUWNHT2Q4TlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo="' + '[' -z '"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMWhrZk9XMzN0ZXR6dzJ5L081MnVEN3FuelBET0RIZUJzUkt0bWo2QTBSa0ppNnhUCm5GSjFMVU13clhtL3NPaktFMVlzU3JyWk5KcGZLM3BDQm1MWldYMlNlTDRnaFpXZnl0eVRyU2RDTXpKNDQ3NFgKSUQ5STdkUGkxWW42Wml3cWNlNTdLbnNGY0VqaHpjckladmRjNTVvZjhoNkpZQXhHeUtjN095QSs5bE9aV0tOZApodndNWWkrd3pLeThNUGhlRnV6N0hXbTQ1WWRHZDhoQmZPc1pFUzgyNFpyR3VIV3l0K1g5VVJKcUJhRzloc3JICmhrNVNFMGQ3d1R2Z3I2eVJEbUdNQjVlbUxMQUhDSm5aRUNqQmxSU2t1ZndZYWdKNXF0c1RTWTc4OE12OXlmd3EKbUtOTlhwUjV2Tk1VQkRMeW5mdk40UThvTnJDUmFqNm1BZkdtZndJREFRQUJBb0lCQUF1NHJHRDBpRTlKYUxrVwpSeEg0ZkFNWWNQOWpYNlNIRm02Zld5bktxanJJYUNGaGVlSmhFVkU5RUhrdTVUUEdTdW5zR2RpdGloNjdKT0s0CmVzem10SWVWdkdNbjVaZnFyU2pxRXB1MFBvZVhCODlRSk92N0N2OGFUcHpxUk84N0U4ZlJDcVUwdndONnVnSU8KVEJKM0VIc05rUm9zV2RFTVd3TzVSQ0RranczbFJDVzhOcVk3NlBpNzJjcTFSUkp3bG43WVRhdk9EV0tQQlpZegp6OFFpeWtMWC9UN2l6MVg0dElhdy9vS2xTeDFweUFKQXdIclF3eklFSjRtM3ZnTlUwcFRJSUk0U2g5SVZ1bXJaCi85bTJLWUJMQWdrNUpWbVdGRElmUjVLazZsNnRwWVcrYVpnRVNmLzJCRmxqeFExbEMyOHVBcldDN1RCdjVJVFcKMENQRmRZRUNnWUVBNGxWS1RENjM4aGo5RGJZenFoY0IrRktZKzdmUmM4NTdIeTNzZGdNRFFPSGlCY3FBNmVPcwpOeVovY1NFMjA1Qjc0Q2VVTm1VMEpuaE94emxHRFgxazdjWkd0NEVjQlkvNmRqNTJRbmZLa2cwTXZxWkNtU3paCkxCMmU2czJ4TEMrUWUyR3hCdDBDOVNVZC94WHZubHBLanBTTG9FdGtvd3lTeVFFeHMwWmpIMDhDZ1lFQThpbEgKb1lTV1duUTczL3JiUXpYUUZuZWtjVU1VNXlhM0dGeFBQVnBVYU1PTCtQS1EvK0VPWHNrNnk5c2I5aWhRQUhraApRSzIydFluazV1bWQ4VHFqcnlkZ1h4SWJVRjg5RjcyTlJ2MktNVnFkMmNuZGp3YXhCakE3d0hab3V4ZjkyWDhoCk85RjNySzhMYnY0MkROY2kxbmJ0VlFEYnMzR3NhWnRUOFI4THVkRUNnWUVBelc2MjRsK25ISko3MWQ5RUk2V0EKZjBHUlpDQ2pkMmE2TUVIQnVCZmg4eGxUeXdmVVhQYnUwSndiM3JaVUNsSEJibUxlVDBVcXdhUS9kT3JTWDlvcQpLdnZtSWhlSk5LOUxtZG4yQXp5eDNFRUdDV0hsWHh6cS9mVGcwcVRpcElXRHNjbXBJZDZuR1pXWUZYbDJwaGUwClJwZ2o1OW1tdmF2aTY1a3U0RmNLRDFjQ2dZRUE0aWgzUTN0bVIvZ0I2ejZIQkEvLzFOdkw4aE5xTkM3T0ZLd3gKUE9WZHE3SGt5WTBRcWtodmVVcUo1TlV2R2dld0g2RUEwenpwUkVZU1J5MnJXaDBmdlhjUzQvY3ZGeks3QmE3NwpnRzlOWHFaNGdXdXJ4ZVhEOGptZHRETmh3RW01eENTY3p4YmpvNXlmR3Z2RFV0NmhKSlpSTkcvbXY0SE1PWnpkCkduV2FKK0VDZ1lBenFna3Rkdk8xeXR1ZjhENFZERlYvNEJ6WW1qVjFxMWExNTQ1L05EVGIvTnhTcGQyZGQzcGoKWlFBcEtnSExuMDFwT2ZPdURsclh5b2VLckdGSzlRZG1HZTg0NzZjcUhROE1FQS8vMFZ2UVZ4YW9BdDZPVHZMZQpIK1VYYnlDcjZ1UTY2TkdhQ01MV2hHZ05KU0ZuNlV5dkdBRnBLbmRMdHY1cEhnUWNHT2Q4TlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo="' ']' + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.0FECrfOzb7 ++ mktemp + local LAST_ERR=/tmp/tmp.reMZl95AFV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0FECrfOzb7 + cat /tmp/tmp.reMZl95AFV + rm /tmp/tmp.0FECrfOzb7 /tmp/tmp.reMZl95AFV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-ca-issuer OK' + set +o xtrace [2025-10-31T15:54:48+0000] compare_kubectl: issuer/some-name-psmdb-ca-issuer OK + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.bwNaktX3jO ++ mktemp + local LAST_ERR=/tmp/tmp.6lCI2O8RHZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bwNaktX3jO + cat /tmp/tmp.6lCI2O8RHZ + rm /tmp/tmp.bwNaktX3jO /tmp/tmp.6lCI2O8RHZ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-issuer OK' + set +o xtrace [2025-10-31T15:54:49+0000] compare_kubectl: issuer/some-name-psmdb-issuer OK + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.s5ZzHloS2C ++ mktemp + local LAST_ERR=/tmp/tmp.KgY4SNI4wS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s5ZzHloS2C + cat /tmp/tmp.KgY4SNI4wS + rm /tmp/tmp.s5ZzHloS2C /tmp/tmp.KgY4SNI4wS + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + log 'compare_kubectl: certificate/some-name-ssl OK' + set +o xtrace [2025-10-31T15:54:51+0000] compare_kubectl: certificate/some-name-ssl OK + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.WjfhKgHAxW ++ mktemp + local LAST_ERR=/tmp/tmp.nsG64ATHjW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WjfhKgHAxW + cat /tmp/tmp.nsG64ATHjW + rm /tmp/tmp.WjfhKgHAxW /tmp/tmp.nsG64ATHjW + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + log 'compare_kubectl: certificate/some-name-ssl-internal OK' + set +o xtrace [2025-10-31T15:54:51+0000] compare_kubectl: certificate/some-name-ssl-internal OK + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VrCf57sKph +++ mktemp ++ local LAST_ERR=/tmp/tmp.g6cWaGLIbO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VrCf57sKph ++ cat /tmp/tmp.g6cWaGLIbO ++ rm /tmp/tmp.VrCf57sKph /tmp/tmp.g6cWaGLIbO ++ return 0 + pod_name=cmctl-7c7f6bf77d-mlklh + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oEVB3XxcOP +++ mktemp ++ local LAST_ERR=/tmp/tmp.YNDiECPGaN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oEVB3XxcOP ++ cat /tmp/tmp.YNDiECPGaN ++ rm /tmp/tmp.oEVB3XxcOP /tmp/tmp.YNDiECPGaN ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-mlklh -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.C1INgzHXOs ++ mktemp + local LAST_ERR=/tmp/tmp.Lno42ocMmL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-mlklh -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C1INgzHXOs Manually triggered issuance of Certificate tls-issue-cert-manager-14931/some-name-ssl + cat /tmp/tmp.Lno42ocMmL + rm /tmp/tmp.C1INgzHXOs /tmp/tmp.Lno42ocMmL + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ePykqvwlzq +++ mktemp ++ local LAST_ERR=/tmp/tmp.ouFaWOehcW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ePykqvwlzq ++ cat /tmp/tmp.ouFaWOehcW ++ rm /tmp/tmp.ePykqvwlzq /tmp/tmp.ouFaWOehcW ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IGsc4bwDBc +++ mktemp ++ local LAST_ERR=/tmp/tmp.QM0rtrihcg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IGsc4bwDBc ++ cat /tmp/tmp.QM0rtrihcg ++ rm /tmp/tmp.IGsc4bwDBc /tmp/tmp.QM0rtrihcg ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Epx4vM2Uc8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ASh9K11XF9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Epx4vM2Uc8 ++ cat /tmp/tmp.ASh9K11XF9 ++ rm /tmp/tmp.Epx4vM2Uc8 /tmp/tmp.ASh9K11XF9 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VsIICslFOx +++ mktemp ++ local LAST_ERR=/tmp/tmp.c9lXqlhLaR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VsIICslFOx ++ cat /tmp/tmp.c9lXqlhLaR ++ rm /tmp/tmp.VsIICslFOx /tmp/tmp.c9lXqlhLaR ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................................................................................................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4lUvJYIskZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.pLC6aroEww ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4lUvJYIskZ ++ cat /tmp/tmp.pLC6aroEww ++ rm /tmp/tmp.4lUvJYIskZ /tmp/tmp.pLC6aroEww ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T38sWIDzwv +++ mktemp ++ local LAST_ERR=/tmp/tmp.u1mdIIEfZI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T38sWIDzwv ++ cat /tmp/tmp.u1mdIIEfZI ++ rm /tmp/tmp.T38sWIDzwv /tmp/tmp.u1mdIIEfZI ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w8mCjB3qFA +++ mktemp ++ local LAST_ERR=/tmp/tmp.dj0YnTmw2K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w8mCjB3qFA ++ cat /tmp/tmp.dj0YnTmw2K ++ rm /tmp/tmp.w8mCjB3qFA /tmp/tmp.dj0YnTmw2K ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nnqwXOSDVB +++ mktemp ++ local LAST_ERR=/tmp/tmp.xUUflc11Yp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nnqwXOSDVB ++ cat /tmp/tmp.xUUflc11Yp ++ rm /tmp/tmp.nnqwXOSDVB /tmp/tmp.xUUflc11Yp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cRzBDUJqFe +++ mktemp ++ local LAST_ERR=/tmp/tmp.9nUmiERw72 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cRzBDUJqFe ++ cat /tmp/tmp.9nUmiERw72 ++ rm /tmp/tmp.cRzBDUJqFe /tmp/tmp.9nUmiERw72 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KeDAgRfMhH +++ mktemp ++ local LAST_ERR=/tmp/tmp.j87R3U8DIT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KeDAgRfMhH ++ cat /tmp/tmp.j87R3U8DIT ++ rm /tmp/tmp.KeDAgRfMhH /tmp/tmp.j87R3U8DIT ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4IQS18NXus +++ mktemp ++ local LAST_ERR=/tmp/tmp.WjF2iPY997 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4IQS18NXus ++ cat /tmp/tmp.WjF2iPY997 ++ rm /tmp/tmp.4IQS18NXus /tmp/tmp.WjF2iPY997 ++ return 0 + pod_name=cmctl-7c7f6bf77d-mlklh + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yUysk6R4Mb +++ mktemp ++ local LAST_ERR=/tmp/tmp.6yAiCDKGIi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yUysk6R4Mb ++ cat /tmp/tmp.6yAiCDKGIi ++ rm /tmp/tmp.yUysk6R4Mb /tmp/tmp.6yAiCDKGIi ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-mlklh -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.aMp2JuWS9H ++ mktemp + local LAST_ERR=/tmp/tmp.zVDGEhzHPb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-mlklh -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aMp2JuWS9H Manually triggered issuance of Certificate tls-issue-cert-manager-14931/some-name-ssl-internal + cat /tmp/tmp.zVDGEhzHPb + rm /tmp/tmp.aMp2JuWS9H /tmp/tmp.zVDGEhzHPb + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EdHydEdV52 +++ mktemp ++ local LAST_ERR=/tmp/tmp.65Rd2UEzGH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EdHydEdV52 ++ cat /tmp/tmp.65Rd2UEzGH ++ rm /tmp/tmp.EdHydEdV52 /tmp/tmp.65Rd2UEzGH ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hl4d2jzwCb +++ mktemp ++ local LAST_ERR=/tmp/tmp.PNegkrJiiM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hl4d2jzwCb ++ cat /tmp/tmp.PNegkrJiiM ++ rm /tmp/tmp.hl4d2jzwCb /tmp/tmp.PNegkrJiiM ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5S6KiDEMRl +++ mktemp ++ local LAST_ERR=/tmp/tmp.T45Tx2acmo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5S6KiDEMRl ++ cat /tmp/tmp.T45Tx2acmo ++ rm /tmp/tmp.5S6KiDEMRl /tmp/tmp.T45Tx2acmo ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q0meKxwhwh +++ mktemp ++ local LAST_ERR=/tmp/tmp.XxZN8c9XDB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q0meKxwhwh ++ cat /tmp/tmp.XxZN8c9XDB ++ rm /tmp/tmp.q0meKxwhwh /tmp/tmp.XxZN8c9XDB ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....................................................................................................................................................................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bJMf2PeQaG +++ mktemp ++ local LAST_ERR=/tmp/tmp.yoV5UCK4rC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bJMf2PeQaG ++ cat /tmp/tmp.yoV5UCK4rC ++ rm /tmp/tmp.bJMf2PeQaG /tmp/tmp.yoV5UCK4rC ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HVnp7rItUE +++ mktemp ++ local LAST_ERR=/tmp/tmp.3yfoKGhKoo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HVnp7rItUE ++ cat /tmp/tmp.3yfoKGhKoo ++ rm /tmp/tmp.HVnp7rItUE /tmp/tmp.3yfoKGhKoo ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BBlu5DpJ72 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yNcLAUozKI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BBlu5DpJ72 ++ cat /tmp/tmp.yNcLAUozKI ++ rm /tmp/tmp.BBlu5DpJ72 /tmp/tmp.yNcLAUozKI ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.blFxxAnAgH +++ mktemp ++ local LAST_ERR=/tmp/tmp.K7kvoOlboh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.blFxxAnAgH ++ cat /tmp/tmp.K7kvoOlboh ++ rm /tmp/tmp.blFxxAnAgH /tmp/tmp.K7kvoOlboh ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3zKWKN00AS +++ mktemp ++ local LAST_ERR=/tmp/tmp.9GXRLK0QTt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3zKWKN00AS ++ cat /tmp/tmp.9GXRLK0QTt ++ rm /tmp/tmp.3zKWKN00AS /tmp/tmp.9GXRLK0QTt ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JdV5bZzypp +++ mktemp ++ local LAST_ERR=/tmp/tmp.qOCxtLSxTN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JdV5bZzypp ++ cat /tmp/tmp.qOCxtLSxTN ++ rm /tmp/tmp.JdV5bZzypp /tmp/tmp.qOCxtLSxTN ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ACrqk5LT7E ++ mktemp + local LAST_ERR=/tmp/tmp.bkeGPy840U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ACrqk5LT7E + cat /tmp/tmp.bkeGPy840U + rm /tmp/tmp.ACrqk5LT7E /tmp/tmp.bkeGPy840U + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-ca-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-ca-issuer OK' + set +o xtrace [2025-10-31T16:09:11+0000] compare_kubectl: issuer/some-name-psmdb-ca-issuer OK + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ziYEYfFFhz ++ mktemp + local LAST_ERR=/tmp/tmp.y80u59fqAS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ziYEYfFFhz + cat /tmp/tmp.y80u59fqAS + rm /tmp/tmp.ziYEYfFFhz /tmp/tmp.y80u59fqAS + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.hnuVMpDSnH/issuer_some-name-psmdb-issuer.yml + log 'compare_kubectl: issuer/some-name-psmdb-issuer OK' + set +o xtrace [2025-10-31T16:09:11+0000] compare_kubectl: issuer/some-name-psmdb-issuer OK + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.gTDLmapDMS ++ mktemp + local LAST_ERR=/tmp/tmp.t3X5zcYr9C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gTDLmapDMS + cat /tmp/tmp.t3X5zcYr9C + rm /tmp/tmp.gTDLmapDMS /tmp/tmp.t3X5zcYr9C + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl.yml + log 'compare_kubectl: certificate/some-name-ssl OK' + set +o xtrace [2025-10-31T16:09:12+0000] compare_kubectl: certificate/some-name-ssl OK + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.wI9azTdJQL ++ mktemp + local LAST_ERR=/tmp/tmp.899zGyl83W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wI9azTdJQL + cat /tmp/tmp.899zGyl83W + rm /tmp/tmp.wI9azTdJQL /tmp/tmp.899zGyl83W + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.hnuVMpDSnH/certificate_some-name-ssl-internal.yml + log 'compare_kubectl: certificate/some-name-ssl-internal OK' + set +o xtrace [2025-10-31T16:09:14+0000] compare_kubectl: certificate/some-name-ssl-internal OK + desc 'disable TLS' + set +o xtrace ----------------------------------------------------------------------------------- disable TLS ----------------------------------------------------------------------------------- + pause_cluster some-name + local cluster_name=some-name + echo 'Pausing cluster some-name' Pausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' ++ mktemp + local LAST_OUT=/tmp/tmp.UkMgk7ptvc ++ mktemp + local LAST_ERR=/tmp/tmp.vbXtNsOwYE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UkMgk7ptvc perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.vbXtNsOwYE + rm /tmp/tmp.UkMgk7ptvc /tmp/tmp.vbXtNsOwYE + return 0 + wait_for_cluster_state some-name paused + local cluster_name=some-name + local target_state=paused + echo -n 'Waiting for psmdb/some-name to reach paused state' Waiting for psmdb/some-name to reach paused state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0eV7KabOlk +++ mktemp ++ local LAST_ERR=/tmp/tmp.XBYugU4RZ6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0eV7KabOlk ++ cat /tmp/tmp.XBYugU4RZ6 ++ rm /tmp/tmp.0eV7KabOlk /tmp/tmp.XBYugU4RZ6 ++ return 0 + [[ ready =~ paused ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7FAWp6Dcbt +++ mktemp ++ local LAST_ERR=/tmp/tmp.g1EA3C6nMg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7FAWp6Dcbt ++ cat /tmp/tmp.g1EA3C6nMg ++ rm /tmp/tmp.7FAWp6Dcbt /tmp/tmp.g1EA3C6nMg ++ return 0 + [[ ready =~ paused ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NbhILjuDPx +++ mktemp ++ local LAST_ERR=/tmp/tmp.D8k5tZB8zz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NbhILjuDPx ++ cat /tmp/tmp.D8k5tZB8zz ++ rm /tmp/tmp.NbhILjuDPx /tmp/tmp.D8k5tZB8zz ++ return 0 + [[ ready =~ paused ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zx2zuCeXst +++ mktemp ++ local LAST_ERR=/tmp/tmp.47LzPjczlg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zx2zuCeXst ++ cat /tmp/tmp.47LzPjczlg ++ rm /tmp/tmp.zx2zuCeXst /tmp/tmp.47LzPjczlg ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7wpxL12pze +++ mktemp ++ local LAST_ERR=/tmp/tmp.EZWw2bhhrW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7wpxL12pze ++ cat /tmp/tmp.EZWw2bhhrW ++ rm /tmp/tmp.7wpxL12pze /tmp/tmp.EZWw2bhhrW ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BtD1nxAvPo +++ mktemp ++ local LAST_ERR=/tmp/tmp.x20RlRKOct ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BtD1nxAvPo ++ cat /tmp/tmp.x20RlRKOct ++ rm /tmp/tmp.BtD1nxAvPo /tmp/tmp.x20RlRKOct ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pTm1Tm7blF +++ mktemp ++ local LAST_ERR=/tmp/tmp.sct40fz93I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pTm1Tm7blF ++ cat /tmp/tmp.sct40fz93I ++ rm /tmp/tmp.pTm1Tm7blF /tmp/tmp.sct40fz93I ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PT8XG40wA2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.azWUNlb44X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PT8XG40wA2 ++ cat /tmp/tmp.azWUNlb44X ++ rm /tmp/tmp.PT8XG40wA2 /tmp/tmp.azWUNlb44X ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ET8HAwtrim +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fh5fust3sP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ET8HAwtrim ++ cat /tmp/tmp.Fh5fust3sP ++ rm /tmp/tmp.ET8HAwtrim /tmp/tmp.Fh5fust3sP ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qq1IFqVCvm +++ mktemp ++ local LAST_ERR=/tmp/tmp.FLiBwjU9ul ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qq1IFqVCvm ++ cat /tmp/tmp.FLiBwjU9ul ++ rm /tmp/tmp.Qq1IFqVCvm /tmp/tmp.FLiBwjU9ul ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rfFFhm8HdH +++ mktemp ++ local LAST_ERR=/tmp/tmp.NtAS4hR3JY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rfFFhm8HdH ++ cat /tmp/tmp.NtAS4hR3JY ++ rm /tmp/tmp.rfFFhm8HdH /tmp/tmp.NtAS4hR3JY ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6y9MmskgvW +++ mktemp ++ local LAST_ERR=/tmp/tmp.5OhDnuchWW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6y9MmskgvW ++ cat /tmp/tmp.5OhDnuchWW ++ rm /tmp/tmp.6y9MmskgvW /tmp/tmp.5OhDnuchWW ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zj17eKdkZR +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hoe3biy7CG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zj17eKdkZR ++ cat /tmp/tmp.Hoe3biy7CG ++ rm /tmp/tmp.Zj17eKdkZR /tmp/tmp.Hoe3biy7CG ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9BejA5jv8k +++ mktemp ++ local LAST_ERR=/tmp/tmp.k0ytxpGgYM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9BejA5jv8k ++ cat /tmp/tmp.k0ytxpGgYM ++ rm /tmp/tmp.9BejA5jv8k /tmp/tmp.k0ytxpGgYM ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PAT1gId0yM +++ mktemp ++ local LAST_ERR=/tmp/tmp.CBvcsGGM9B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PAT1gId0yM ++ cat /tmp/tmp.CBvcsGGM9B ++ rm /tmp/tmp.PAT1gId0yM /tmp/tmp.CBvcsGGM9B ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aYtPE47KCc +++ mktemp ++ local LAST_ERR=/tmp/tmp.e0DdFpS4iJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aYtPE47KCc ++ cat /tmp/tmp.e0DdFpS4iJ ++ rm /tmp/tmp.aYtPE47KCc /tmp/tmp.e0DdFpS4iJ ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zp1P7tVA42 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NYRDq5FRhp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zp1P7tVA42 ++ cat /tmp/tmp.NYRDq5FRhp ++ rm /tmp/tmp.zp1P7tVA42 /tmp/tmp.NYRDq5FRhp ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AqUxhhTIQo +++ mktemp ++ local LAST_ERR=/tmp/tmp.NtkkfMbpp9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AqUxhhTIQo ++ cat /tmp/tmp.NtkkfMbpp9 ++ rm /tmp/tmp.AqUxhhTIQo /tmp/tmp.NtkkfMbpp9 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YdAlMMWxRX +++ mktemp ++ local LAST_ERR=/tmp/tmp.g5m1A9ap48 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YdAlMMWxRX ++ cat /tmp/tmp.g5m1A9ap48 ++ rm /tmp/tmp.YdAlMMWxRX /tmp/tmp.g5m1A9ap48 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eJdIJXvaqU +++ mktemp ++ local LAST_ERR=/tmp/tmp.531sSjZwmN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eJdIJXvaqU ++ cat /tmp/tmp.531sSjZwmN ++ rm /tmp/tmp.eJdIJXvaqU /tmp/tmp.531sSjZwmN ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vOZIHfPD2a +++ mktemp ++ local LAST_ERR=/tmp/tmp.sgy5jHzDsf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vOZIHfPD2a ++ cat /tmp/tmp.sgy5jHzDsf ++ rm /tmp/tmp.vOZIHfPD2a /tmp/tmp.sgy5jHzDsf ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7oiRi9iD2e +++ mktemp ++ local LAST_ERR=/tmp/tmp.BEMXrMn4pM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7oiRi9iD2e ++ cat /tmp/tmp.BEMXrMn4pM ++ rm /tmp/tmp.7oiRi9iD2e /tmp/tmp.BEMXrMn4pM ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.slmcAC0wMa +++ mktemp ++ local LAST_ERR=/tmp/tmp.mqBXFHGksK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.slmcAC0wMa ++ cat /tmp/tmp.mqBXFHGksK ++ rm /tmp/tmp.slmcAC0wMa /tmp/tmp.mqBXFHGksK ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.efWYzIyHam +++ mktemp ++ local LAST_ERR=/tmp/tmp.N9SeoCzBZP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.efWYzIyHam ++ cat /tmp/tmp.N9SeoCzBZP ++ rm /tmp/tmp.efWYzIyHam /tmp/tmp.N9SeoCzBZP ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.drDa1P7ECh +++ mktemp ++ local LAST_ERR=/tmp/tmp.LyyzTQYpbK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.drDa1P7ECh ++ cat /tmp/tmp.LyyzTQYpbK ++ rm /tmp/tmp.drDa1P7ECh /tmp/tmp.LyyzTQYpbK ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6EP1HKmbhO +++ mktemp ++ local LAST_ERR=/tmp/tmp.B3xNtOo4fe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6EP1HKmbhO ++ cat /tmp/tmp.B3xNtOo4fe ++ rm /tmp/tmp.6EP1HKmbhO /tmp/tmp.B3xNtOo4fe ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pqrKNdxobA +++ mktemp ++ local LAST_ERR=/tmp/tmp.zE3X2e9pVv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pqrKNdxobA ++ cat /tmp/tmp.zE3X2e9pVv ++ rm /tmp/tmp.pqrKNdxobA /tmp/tmp.zE3X2e9pVv ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uM4dvYQrCS +++ mktemp ++ local LAST_ERR=/tmp/tmp.lqkqAhMmgr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uM4dvYQrCS ++ cat /tmp/tmp.lqkqAhMmgr ++ rm /tmp/tmp.uM4dvYQrCS /tmp/tmp.lqkqAhMmgr ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OsIDcEWUnk +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kx1fMyXEyu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OsIDcEWUnk ++ cat /tmp/tmp.Kx1fMyXEyu ++ rm /tmp/tmp.OsIDcEWUnk /tmp/tmp.Kx1fMyXEyu ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T7cBLkPBAx +++ mktemp ++ local LAST_ERR=/tmp/tmp.jIgEi2yeGF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T7cBLkPBAx ++ cat /tmp/tmp.jIgEi2yeGF ++ rm /tmp/tmp.T7cBLkPBAx /tmp/tmp.jIgEi2yeGF ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NkdPR2QP4a +++ mktemp ++ local LAST_ERR=/tmp/tmp.qVghZKryUo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NkdPR2QP4a ++ cat /tmp/tmp.qVghZKryUo ++ rm /tmp/tmp.NkdPR2QP4a /tmp/tmp.qVghZKryUo ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.otP0K8WDxh +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xu7HPQtV3c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.otP0K8WDxh ++ cat /tmp/tmp.Xu7HPQtV3c ++ rm /tmp/tmp.otP0K8WDxh /tmp/tmp.Xu7HPQtV3c ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ljUADhcJtd +++ mktemp ++ local LAST_ERR=/tmp/tmp.tGqWpaxxUC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ljUADhcJtd ++ cat /tmp/tmp.tGqWpaxxUC ++ rm /tmp/tmp.ljUADhcJtd /tmp/tmp.tGqWpaxxUC ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IF2XF4t0I1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4yfsj6iq1E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IF2XF4t0I1 ++ cat /tmp/tmp.4yfsj6iq1E ++ rm /tmp/tmp.IF2XF4t0I1 /tmp/tmp.4yfsj6iq1E ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vJM0sCE6cV +++ mktemp ++ local LAST_ERR=/tmp/tmp.HiZXQFRids ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vJM0sCE6cV ++ cat /tmp/tmp.HiZXQFRids ++ rm /tmp/tmp.vJM0sCE6cV /tmp/tmp.HiZXQFRids ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gG3sxSHAql +++ mktemp ++ local LAST_ERR=/tmp/tmp.xprnvxE61P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gG3sxSHAql ++ cat /tmp/tmp.xprnvxE61P ++ rm /tmp/tmp.gG3sxSHAql /tmp/tmp.xprnvxE61P ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qSzzlSyefk +++ mktemp ++ local LAST_ERR=/tmp/tmp.rezKPTe6gn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qSzzlSyefk ++ cat /tmp/tmp.rezKPTe6gn ++ rm /tmp/tmp.qSzzlSyefk /tmp/tmp.rezKPTe6gn ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QtgOp4XEUr +++ mktemp ++ local LAST_ERR=/tmp/tmp.BzfxWKk5vd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QtgOp4XEUr ++ cat /tmp/tmp.BzfxWKk5vd ++ rm /tmp/tmp.QtgOp4XEUr /tmp/tmp.BzfxWKk5vd ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o55v9AI8dQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.lhEPd0orxr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o55v9AI8dQ ++ cat /tmp/tmp.lhEPd0orxr ++ rm /tmp/tmp.o55v9AI8dQ /tmp/tmp.lhEPd0orxr ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4A9B1PnChQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Q4t3wQSzv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4A9B1PnChQ ++ cat /tmp/tmp.6Q4t3wQSzv ++ rm /tmp/tmp.4A9B1PnChQ /tmp/tmp.6Q4t3wQSzv ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.onz5nnDdwW +++ mktemp ++ local LAST_ERR=/tmp/tmp.tGJx8UxdDr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.onz5nnDdwW ++ cat /tmp/tmp.tGJx8UxdDr ++ rm /tmp/tmp.onz5nnDdwW /tmp/tmp.tGJx8UxdDr ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tOMldFfU7K +++ mktemp ++ local LAST_ERR=/tmp/tmp.kIKBChHXVi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tOMldFfU7K ++ cat /tmp/tmp.kIKBChHXVi ++ rm /tmp/tmp.tOMldFfU7K /tmp/tmp.kIKBChHXVi ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8VCw6e6Xhf +++ mktemp ++ local LAST_ERR=/tmp/tmp.p4EmMSg783 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8VCw6e6Xhf ++ cat /tmp/tmp.p4EmMSg783 ++ rm /tmp/tmp.8VCw6e6Xhf /tmp/tmp.p4EmMSg783 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.828bu4ypkL +++ mktemp ++ local LAST_ERR=/tmp/tmp.6w3cd0Zny2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.828bu4ypkL ++ cat /tmp/tmp.6w3cd0Zny2 ++ rm /tmp/tmp.828bu4ypkL /tmp/tmp.6w3cd0Zny2 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qpxumw7bfb +++ mktemp ++ local LAST_ERR=/tmp/tmp.nClIUzUfF6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qpxumw7bfb ++ cat /tmp/tmp.nClIUzUfF6 ++ rm /tmp/tmp.qpxumw7bfb /tmp/tmp.nClIUzUfF6 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=45 + echo -n . .+ [[ 45 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gHNaSMVTBD +++ mktemp ++ local LAST_ERR=/tmp/tmp.MFR6Y3liUS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gHNaSMVTBD ++ cat /tmp/tmp.MFR6Y3liUS ++ rm /tmp/tmp.gHNaSMVTBD /tmp/tmp.MFR6Y3liUS ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=46 + echo -n . .+ [[ 46 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wwoFcaAXxB +++ mktemp ++ local LAST_ERR=/tmp/tmp.d8aWnVoVSK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wwoFcaAXxB ++ cat /tmp/tmp.d8aWnVoVSK ++ rm /tmp/tmp.wwoFcaAXxB /tmp/tmp.d8aWnVoVSK ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=47 + echo -n . .+ [[ 47 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FYytOnYKQE +++ mktemp ++ local LAST_ERR=/tmp/tmp.T5CssvJYk8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FYytOnYKQE ++ cat /tmp/tmp.T5CssvJYk8 ++ rm /tmp/tmp.FYytOnYKQE /tmp/tmp.T5CssvJYk8 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=48 + echo -n . .+ [[ 48 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nr99vkKQYK +++ mktemp ++ local LAST_ERR=/tmp/tmp.nyi0ZaHtLr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nr99vkKQYK ++ cat /tmp/tmp.nyi0ZaHtLr ++ rm /tmp/tmp.nr99vkKQYK /tmp/tmp.nyi0ZaHtLr ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=49 + echo -n . .+ [[ 49 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YEmQutBNZ6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xKkwyCuXfB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YEmQutBNZ6 ++ cat /tmp/tmp.xKkwyCuXfB ++ rm /tmp/tmp.YEmQutBNZ6 /tmp/tmp.xKkwyCuXfB ++ return 0 + [[ error =~ paused ]] + sleep 1 + timeout=50 + echo -n . .+ [[ 50 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jO6BAqovPh +++ mktemp ++ local LAST_ERR=/tmp/tmp.TstDchnn95 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jO6BAqovPh ++ cat /tmp/tmp.TstDchnn95 ++ rm /tmp/tmp.jO6BAqovPh /tmp/tmp.TstDchnn95 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=51 + echo -n . .+ [[ 51 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K3XmzGF7ir +++ mktemp ++ local LAST_ERR=/tmp/tmp.AkFf0V4aRZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K3XmzGF7ir ++ cat /tmp/tmp.AkFf0V4aRZ ++ rm /tmp/tmp.K3XmzGF7ir /tmp/tmp.AkFf0V4aRZ ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=52 + echo -n . .+ [[ 52 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QYwRLUNaVy +++ mktemp ++ local LAST_ERR=/tmp/tmp.7K5CTOEzb9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QYwRLUNaVy ++ cat /tmp/tmp.7K5CTOEzb9 ++ rm /tmp/tmp.QYwRLUNaVy /tmp/tmp.7K5CTOEzb9 ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=53 + echo -n . .+ [[ 53 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fTUmOfMY2B +++ mktemp ++ local LAST_ERR=/tmp/tmp.gblIm8QXfW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fTUmOfMY2B ++ cat /tmp/tmp.gblIm8QXfW ++ rm /tmp/tmp.fTUmOfMY2B /tmp/tmp.gblIm8QXfW ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=54 + echo -n . .+ [[ 54 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ANOUJrXjta +++ mktemp ++ local LAST_ERR=/tmp/tmp.yVxCuO3uvy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ANOUJrXjta ++ cat /tmp/tmp.yVxCuO3uvy ++ rm /tmp/tmp.ANOUJrXjta /tmp/tmp.yVxCuO3uvy ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=55 + echo -n . .+ [[ 55 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AVMNUzhlu6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.esIgH4R9vK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AVMNUzhlu6 ++ cat /tmp/tmp.esIgH4R9vK ++ rm /tmp/tmp.AVMNUzhlu6 /tmp/tmp.esIgH4R9vK ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=56 + echo -n . .+ [[ 56 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dIarz8eWPu +++ mktemp ++ local LAST_ERR=/tmp/tmp.roo9aWqdqw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dIarz8eWPu ++ cat /tmp/tmp.roo9aWqdqw ++ rm /tmp/tmp.dIarz8eWPu /tmp/tmp.roo9aWqdqw ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=57 + echo -n . .+ [[ 57 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PBMuOM32FK +++ mktemp ++ local LAST_ERR=/tmp/tmp.rQZVDi6Yxv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PBMuOM32FK ++ cat /tmp/tmp.rQZVDi6Yxv ++ rm /tmp/tmp.PBMuOM32FK /tmp/tmp.rQZVDi6Yxv ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=58 + echo -n . .+ [[ 58 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S5N1dIfqMK +++ mktemp ++ local LAST_ERR=/tmp/tmp.2DgEiMgXCj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S5N1dIfqMK ++ cat /tmp/tmp.2DgEiMgXCj ++ rm /tmp/tmp.S5N1dIfqMK /tmp/tmp.2DgEiMgXCj ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=59 + echo -n . .+ [[ 59 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LcSyc35HZI +++ mktemp ++ local LAST_ERR=/tmp/tmp.IgXbZlPeUp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LcSyc35HZI ++ cat /tmp/tmp.IgXbZlPeUp ++ rm /tmp/tmp.LcSyc35HZI /tmp/tmp.IgXbZlPeUp ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=60 + echo -n . .+ [[ 60 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VTjhFmftD9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ejj2mQEcuk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VTjhFmftD9 ++ cat /tmp/tmp.Ejj2mQEcuk ++ rm /tmp/tmp.VTjhFmftD9 /tmp/tmp.Ejj2mQEcuk ++ return 0 + [[ stopping =~ paused ]] + sleep 1 + timeout=61 + echo -n . .+ [[ 61 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y2JEW1orPO +++ mktemp ++ local LAST_ERR=/tmp/tmp.cpwCzVvmkp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y2JEW1orPO ++ cat /tmp/tmp.cpwCzVvmkp ++ rm /tmp/tmp.y2JEW1orPO /tmp/tmp.cpwCzVvmkp ++ return 0 + [[ paused =~ paused ]] + echo + log 'psmdb/some-name is paused: OK' + set +o xtrace [2025-10-31T16:11:16+0000] psmdb/some-name is paused: OK + disable_tls some-name + local cluster_name=some-name + echo 'Disabling TLS for cluster some-name' Disabling TLS for cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' ++ mktemp + local LAST_OUT=/tmp/tmp.EWnuPm0RaH ++ mktemp + local LAST_ERR=/tmp/tmp.tg2fdkzh3j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EWnuPm0RaH perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.tg2fdkzh3j + rm /tmp/tmp.EWnuPm0RaH /tmp/tmp.tg2fdkzh3j + return 0 + unpause_cluster some-name + local cluster_name=some-name + echo 'Unpausing cluster some-name' Unpausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' ++ mktemp + local LAST_OUT=/tmp/tmp.Sql67UUE9q ++ mktemp + local LAST_ERR=/tmp/tmp.bSFLzqekpa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Sql67UUE9q perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.bSFLzqekpa + rm /tmp/tmp.Sql67UUE9q /tmp/tmp.bSFLzqekpa + return 0 + wait_for_cluster_state some-name ready + local cluster_name=some-name + local target_state=ready + echo -n 'Waiting for psmdb/some-name to reach ready state' Waiting for psmdb/some-name to reach ready state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0RbN3APbOl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qci5OsGSdV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0RbN3APbOl ++ cat /tmp/tmp.Qci5OsGSdV ++ rm /tmp/tmp.0RbN3APbOl /tmp/tmp.Qci5OsGSdV ++ return 0 + [[ paused =~ ready ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qk0zkNXzQc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kwai2FI5TG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qk0zkNXzQc ++ cat /tmp/tmp.Kwai2FI5TG ++ rm /tmp/tmp.Qk0zkNXzQc /tmp/tmp.Kwai2FI5TG ++ return 0 + [[ paused =~ ready ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ifvrJiUOHz +++ mktemp ++ local LAST_ERR=/tmp/tmp.lMXWAqfsmu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ifvrJiUOHz ++ cat /tmp/tmp.lMXWAqfsmu ++ rm /tmp/tmp.ifvrJiUOHz /tmp/tmp.lMXWAqfsmu ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZT4uuRAt4f +++ mktemp ++ local LAST_ERR=/tmp/tmp.B3i3heintP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZT4uuRAt4f ++ cat /tmp/tmp.B3i3heintP ++ rm /tmp/tmp.ZT4uuRAt4f /tmp/tmp.B3i3heintP ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2QCPYMYFkE +++ mktemp ++ local LAST_ERR=/tmp/tmp.KHUfN036P2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2QCPYMYFkE ++ cat /tmp/tmp.KHUfN036P2 ++ rm /tmp/tmp.2QCPYMYFkE /tmp/tmp.KHUfN036P2 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.puhECI8Wp8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.j7c4jCTm58 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.puhECI8Wp8 ++ cat /tmp/tmp.j7c4jCTm58 ++ rm /tmp/tmp.puhECI8Wp8 /tmp/tmp.j7c4jCTm58 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.62Iv6IfTEG +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vcdmxn30dO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.62Iv6IfTEG ++ cat /tmp/tmp.Vcdmxn30dO ++ rm /tmp/tmp.62Iv6IfTEG /tmp/tmp.Vcdmxn30dO ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.03I99WrMzC +++ mktemp ++ local LAST_ERR=/tmp/tmp.hRdyYLegLj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.03I99WrMzC ++ cat /tmp/tmp.hRdyYLegLj ++ rm /tmp/tmp.03I99WrMzC /tmp/tmp.hRdyYLegLj ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7EpAuTcCKg +++ mktemp ++ local LAST_ERR=/tmp/tmp.luGbKU1pjs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7EpAuTcCKg ++ cat /tmp/tmp.luGbKU1pjs ++ rm /tmp/tmp.7EpAuTcCKg /tmp/tmp.luGbKU1pjs ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zOvmhcsBTa +++ mktemp ++ local LAST_ERR=/tmp/tmp.DT6S6VkMxz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zOvmhcsBTa ++ cat /tmp/tmp.DT6S6VkMxz ++ rm /tmp/tmp.zOvmhcsBTa /tmp/tmp.DT6S6VkMxz ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.chlEvcJxJE +++ mktemp ++ local LAST_ERR=/tmp/tmp.T7hjIkbMDx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.chlEvcJxJE ++ cat /tmp/tmp.T7hjIkbMDx ++ rm /tmp/tmp.chlEvcJxJE /tmp/tmp.T7hjIkbMDx ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.moGcAePqJG +++ mktemp ++ local LAST_ERR=/tmp/tmp.siTynBImiq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.moGcAePqJG ++ cat /tmp/tmp.siTynBImiq ++ rm /tmp/tmp.moGcAePqJG /tmp/tmp.siTynBImiq ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fUnjzGDd2R +++ mktemp ++ local LAST_ERR=/tmp/tmp.7aCW33gUYC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fUnjzGDd2R ++ cat /tmp/tmp.7aCW33gUYC ++ rm /tmp/tmp.fUnjzGDd2R /tmp/tmp.7aCW33gUYC ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UcLSL6er7o +++ mktemp ++ local LAST_ERR=/tmp/tmp.L4ALZnn4KJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UcLSL6er7o ++ cat /tmp/tmp.L4ALZnn4KJ ++ rm /tmp/tmp.UcLSL6er7o /tmp/tmp.L4ALZnn4KJ ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pPh0cIfXqE +++ mktemp ++ local LAST_ERR=/tmp/tmp.kHYPgEa9Va ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pPh0cIfXqE ++ cat /tmp/tmp.kHYPgEa9Va ++ rm /tmp/tmp.pPh0cIfXqE /tmp/tmp.kHYPgEa9Va ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9Sc8vrGLmd +++ mktemp ++ local LAST_ERR=/tmp/tmp.s7slX7Pt6e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9Sc8vrGLmd ++ cat /tmp/tmp.s7slX7Pt6e ++ rm /tmp/tmp.9Sc8vrGLmd /tmp/tmp.s7slX7Pt6e ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cqpnyzFAXl +++ mktemp ++ local LAST_ERR=/tmp/tmp.mM7jIrXEeA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cqpnyzFAXl ++ cat /tmp/tmp.mM7jIrXEeA ++ rm /tmp/tmp.cqpnyzFAXl /tmp/tmp.mM7jIrXEeA ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.47OdqKnl9R +++ mktemp ++ local LAST_ERR=/tmp/tmp.atxyvkXrOT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.47OdqKnl9R ++ cat /tmp/tmp.atxyvkXrOT ++ rm /tmp/tmp.47OdqKnl9R /tmp/tmp.atxyvkXrOT ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.amSUFuzNLv +++ mktemp ++ local LAST_ERR=/tmp/tmp.TQtjDL52wU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.amSUFuzNLv ++ cat /tmp/tmp.TQtjDL52wU ++ rm /tmp/tmp.amSUFuzNLv /tmp/tmp.TQtjDL52wU ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AcpNWmAQct +++ mktemp ++ local LAST_ERR=/tmp/tmp.zuFo5lMpFv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AcpNWmAQct ++ cat /tmp/tmp.zuFo5lMpFv ++ rm /tmp/tmp.AcpNWmAQct /tmp/tmp.zuFo5lMpFv ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ya9tTN9pZ9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ub9cVQpsoP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ya9tTN9pZ9 ++ cat /tmp/tmp.ub9cVQpsoP ++ rm /tmp/tmp.Ya9tTN9pZ9 /tmp/tmp.ub9cVQpsoP ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yMKUqzWPXx +++ mktemp ++ local LAST_ERR=/tmp/tmp.YE5XLiiMb4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yMKUqzWPXx ++ cat /tmp/tmp.YE5XLiiMb4 ++ rm /tmp/tmp.yMKUqzWPXx /tmp/tmp.YE5XLiiMb4 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6rBnKBgPA2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.JmSdUkvD4a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6rBnKBgPA2 ++ cat /tmp/tmp.JmSdUkvD4a ++ rm /tmp/tmp.6rBnKBgPA2 /tmp/tmp.JmSdUkvD4a ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KOxv4fS9Xs +++ mktemp ++ local LAST_ERR=/tmp/tmp.AYVXvnnBke ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KOxv4fS9Xs ++ cat /tmp/tmp.AYVXvnnBke ++ rm /tmp/tmp.KOxv4fS9Xs /tmp/tmp.AYVXvnnBke ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p9QX2yGv9r +++ mktemp ++ local LAST_ERR=/tmp/tmp.1AGAPaGjTN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p9QX2yGv9r ++ cat /tmp/tmp.1AGAPaGjTN ++ rm /tmp/tmp.p9QX2yGv9r /tmp/tmp.1AGAPaGjTN ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IyEJi4aLLi +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hx8Byrloof ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IyEJi4aLLi ++ cat /tmp/tmp.Hx8Byrloof ++ rm /tmp/tmp.IyEJi4aLLi /tmp/tmp.Hx8Byrloof ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kUUvoTXaNm +++ mktemp ++ local LAST_ERR=/tmp/tmp.EhCe64rdrg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kUUvoTXaNm ++ cat /tmp/tmp.EhCe64rdrg ++ rm /tmp/tmp.kUUvoTXaNm /tmp/tmp.EhCe64rdrg ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cCkrdkDabw +++ mktemp ++ local LAST_ERR=/tmp/tmp.2aBqQFrAHd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cCkrdkDabw ++ cat /tmp/tmp.2aBqQFrAHd ++ rm /tmp/tmp.cCkrdkDabw /tmp/tmp.2aBqQFrAHd ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tp4M1JqK6P +++ mktemp ++ local LAST_ERR=/tmp/tmp.BMQMCPU4vw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tp4M1JqK6P ++ cat /tmp/tmp.BMQMCPU4vw ++ rm /tmp/tmp.Tp4M1JqK6P /tmp/tmp.BMQMCPU4vw ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UcGFJCStCt +++ mktemp ++ local LAST_ERR=/tmp/tmp.98941VXdW0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UcGFJCStCt ++ cat /tmp/tmp.98941VXdW0 ++ rm /tmp/tmp.UcGFJCStCt /tmp/tmp.98941VXdW0 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gL8MBva3Vr +++ mktemp ++ local LAST_ERR=/tmp/tmp.BBM5k63TeU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gL8MBva3Vr ++ cat /tmp/tmp.BBM5k63TeU ++ rm /tmp/tmp.gL8MBva3Vr /tmp/tmp.BBM5k63TeU ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VJffXvnHf3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CMKg1Ar7Ge ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VJffXvnHf3 ++ cat /tmp/tmp.CMKg1Ar7Ge ++ rm /tmp/tmp.VJffXvnHf3 /tmp/tmp.CMKg1Ar7Ge ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GhWmPgJwV2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yBKdpMmWgm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GhWmPgJwV2 ++ cat /tmp/tmp.yBKdpMmWgm ++ rm /tmp/tmp.GhWmPgJwV2 /tmp/tmp.yBKdpMmWgm ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fmxSJqzAUh +++ mktemp ++ local LAST_ERR=/tmp/tmp.5zzv81j7F9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fmxSJqzAUh ++ cat /tmp/tmp.5zzv81j7F9 ++ rm /tmp/tmp.fmxSJqzAUh /tmp/tmp.5zzv81j7F9 ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tt8845I7d1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0PQqVOkZgY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tt8845I7d1 ++ cat /tmp/tmp.0PQqVOkZgY ++ rm /tmp/tmp.tt8845I7d1 /tmp/tmp.0PQqVOkZgY ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0oV7zqQQVt +++ mktemp ++ local LAST_ERR=/tmp/tmp.JPSaQhgs5p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0oV7zqQQVt ++ cat /tmp/tmp.JPSaQhgs5p ++ rm /tmp/tmp.0oV7zqQQVt /tmp/tmp.JPSaQhgs5p ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GHfr4eDBG8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bokl80OmBp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GHfr4eDBG8 ++ cat /tmp/tmp.Bokl80OmBp ++ rm /tmp/tmp.GHfr4eDBG8 /tmp/tmp.Bokl80OmBp ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LhCU7m7Y4I +++ mktemp ++ local LAST_ERR=/tmp/tmp.zXvd0ORj7B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LhCU7m7Y4I ++ cat /tmp/tmp.zXvd0ORj7B ++ rm /tmp/tmp.LhCU7m7Y4I /tmp/tmp.zXvd0ORj7B ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rLv6ttv55B +++ mktemp ++ local LAST_ERR=/tmp/tmp.GPfMICoJwN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rLv6ttv55B ++ cat /tmp/tmp.GPfMICoJwN ++ rm /tmp/tmp.rLv6ttv55B /tmp/tmp.GPfMICoJwN ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u6O7ZmFnJe +++ mktemp ++ local LAST_ERR=/tmp/tmp.TXGpjkthGD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u6O7ZmFnJe ++ cat /tmp/tmp.TXGpjkthGD ++ rm /tmp/tmp.u6O7ZmFnJe /tmp/tmp.TXGpjkthGD ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W8E11qdUQq +++ mktemp ++ local LAST_ERR=/tmp/tmp.jms57KaqEF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W8E11qdUQq ++ cat /tmp/tmp.jms57KaqEF ++ rm /tmp/tmp.W8E11qdUQq /tmp/tmp.jms57KaqEF ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JZlA2TlOSn +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tr2XLRCc1i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JZlA2TlOSn ++ cat /tmp/tmp.Tr2XLRCc1i ++ rm /tmp/tmp.JZlA2TlOSn /tmp/tmp.Tr2XLRCc1i ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0RPNswjSVg +++ mktemp ++ local LAST_ERR=/tmp/tmp.rmhid3bhBY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0RPNswjSVg ++ cat /tmp/tmp.rmhid3bhBY ++ rm /tmp/tmp.0RPNswjSVg /tmp/tmp.rmhid3bhBY ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9zIPgjmgPt +++ mktemp ++ local LAST_ERR=/tmp/tmp.BzaeLNTbuP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9zIPgjmgPt ++ cat /tmp/tmp.BzaeLNTbuP ++ rm /tmp/tmp.9zIPgjmgPt /tmp/tmp.BzaeLNTbuP ++ return 0 + [[ initializing =~ ready ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.keR8fGkAZ8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.a2qGgoEIPw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.keR8fGkAZ8 ++ cat /tmp/tmp.a2qGgoEIPw ++ rm /tmp/tmp.keR8fGkAZ8 /tmp/tmp.a2qGgoEIPw ++ return 0 + [[ ready =~ ready ]] + echo + log 'psmdb/some-name is ready: OK' + set +o xtrace [2025-10-31T16:12:49+0000] psmdb/some-name is ready: OK + compare_kubectl statefulset/some-name-rs0 -tls-disabled skip_generation_check + local resource=statefulset/some-name-rs0 + local postfix=-tls-disabled + local skip_generation_check=skip_generation_check + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml + local new_result=/tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Oxze723T4H ++ mktemp + local LAST_ERR=/tmp/tmp.yjUWX8f2hA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Oxze723T4H + cat /tmp/tmp.yjUWX8f2hA + rm /tmp/tmp.Oxze723T4H /tmp/tmp.yjUWX8f2hA + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml == */cronjob* ]] + '[' -n skip_generation_check ']' + yq -i eval 'del(.metadata.generation)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml /tmp/tmp.hnuVMpDSnH/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-10-31T16:12:50+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg -tls-disabled skip_generation_check + local resource=statefulset/some-name-cfg + local postfix=-tls-disabled + local skip_generation_check=skip_generation_check + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml + local new_result=/tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.qBPDKfu7ti ++ mktemp + local LAST_ERR=/tmp/tmp.XS0kaSH0hz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qBPDKfu7ti + cat /tmp/tmp.XS0kaSH0hz + rm /tmp/tmp.qBPDKfu7ti /tmp/tmp.XS0kaSH0hz + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml == */cronjob* ]] + '[' -n skip_generation_check ']' + yq -i eval 'del(.metadata.generation)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml /tmp/tmp.hnuVMpDSnH/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-10-31T16:12:51+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos -tls-disabled skip_generation_check + local resource=statefulset/some-name-mongos + local postfix=-tls-disabled + local skip_generation_check=skip_generation_check + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml + local new_result=/tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-14931", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.xEJEfSN5TJ ++ mktemp + local LAST_ERR=/tmp/tmp.YVbMfFHHve + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xEJEfSN5TJ + cat /tmp/tmp.YVbMfFHHve + rm /tmp/tmp.xEJEfSN5TJ /tmp/tmp.YVbMfFHHve + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml == */cronjob* ]] + '[' -n skip_generation_check ']' + yq -i eval 'del(.metadata.generation)' /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml /tmp/tmp.hnuVMpDSnH/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-10-31T16:12:53+0000] compare_kubectl: statefulset/some-name-mongos OK + destroy tls-issue-cert-manager-14931 + local namespace=tls-issue-cert-manager-14931 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.0NKyOLKKqu +++ mktemp ++ local LAST_ERR=/tmp/tmp.GM2uae9Xro ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0NKyOLKKqu ++ cat /tmp/tmp.GM2uae9Xro No resources found in tls-issue-cert-manager-14931 namespace. ++ rm /tmp/tmp.0NKyOLKKqu /tmp/tmp.GM2uae9Xro ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.fepC9cmDa2 ++ mktemp + local LAST_ERR=/tmp/tmp.G8cIbkmBd1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fepC9cmDa2 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.G8cIbkmBd1 + rm /tmp/tmp.fepC9cmDa2 /tmp/tmp.G8cIbkmBd1 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.1SZmXqehRT ++ mktemp + local LAST_ERR=/tmp/tmp.qwmM7YupM0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1SZmXqehRT + cat /tmp/tmp.qwmM7YupM0 + rm /tmp/tmp.1SZmXqehRT /tmp/tmp.qwmM7YupM0 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WmWfx2ypie ++ mktemp + local LAST_ERR=/tmp/tmp.3jFM6MCwq9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WmWfx2ypie + cat /tmp/tmp.3jFM6MCwq9 + rm /tmp/tmp.WmWfx2ypie /tmp/tmp.3jFM6MCwq9 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.kG45TRXkMR ++ mktemp + local LAST_ERR=/tmp/tmp.d40MSDROid + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kG45TRXkMR + cat /tmp/tmp.d40MSDROid + rm /tmp/tmp.kG45TRXkMR /tmp/tmp.d40MSDROid + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.71VjwKbyEb ++ mktemp + local LAST_ERR=/tmp/tmp.bwo9RMmlJ9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2100/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.71VjwKbyEb clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.bwo9RMmlJ9 + rm /tmp/tmp.71VjwKbyEb /tmp/tmp.bwo9RMmlJ9 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.CH2rnSCmgV ++ mktemp + local LAST_ERR=/tmp/tmp.dWztHyZHRS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.CH2rnSCmgV namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace service "cert-manager-cainjector" deleted from cert-manager namespace service "cert-manager" deleted from cert-manager namespace service "cert-manager-webhook" deleted from cert-manager namespace deployment.apps "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager" deleted from cert-manager namespace deployment.apps "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.dWztHyZHRS Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.CH2rnSCmgV namespace "cert-manager" deleted + cat /tmp/tmp.dWztHyZHRS Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.CH2rnSCmgV + cat /tmp/tmp.dWztHyZHRS Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.CH2rnSCmgV + cat /tmp/tmp.dWztHyZHRS Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.CH2rnSCmgV /tmp/tmp.dWztHyZHRS + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace tls-issue-cert-manager-14931 + rm -rf /tmp/tmp.hnuVMpDSnH + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.X40ciDdnND ++ mktemp + local LAST_OUT=/tmp/tmp.d0CO6YtUQC ++ mktemp + local LAST_ERR=/tmp/tmp.GYltqMFH0r + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.13yvIDTWcm + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace tls-issue-cert-manager-14931 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator