Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/logs/tls-issue-cert-manager.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + main + create_infra tls-issue-cert-manager-20452 + local ns=tls-issue-cert-manager-20452 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.Yqlvqy5Avf ++ mktemp + local LAST_ERR=/tmp/tmp.K6nmlzKL7p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yqlvqy5Avf customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.K6nmlzKL7p + rm /tmp/tmp.Yqlvqy5Avf /tmp/tmp.K6nmlzKL7p + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.6DT0d5vs9y ++ mktemp + local LAST_ERR=/tmp/tmp.g8qemJeo3s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6DT0d5vs9y + cat /tmp/tmp.g8qemJeo3s + rm /tmp/tmp.6DT0d5vs9y /tmp/tmp.g8qemJeo3s + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.r8xOPqAbGg ++ mktemp + local LAST_ERR=/tmp/tmp.Ki31ThVzwv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r8xOPqAbGg + cat /tmp/tmp.Ki31ThVzwv + rm /tmp/tmp.r8xOPqAbGg /tmp/tmp.Ki31ThVzwv + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.1zUMkhoGRs ++ mktemp + local LAST_ERR=/tmp/tmp.7Vo7JcICTz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1zUMkhoGRs + cat /tmp/tmp.7Vo7JcICTz + rm /tmp/tmp.1zUMkhoGRs /tmp/tmp.7Vo7JcICTz + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.NK9wP614ta ++ mktemp + local LAST_ERR=/tmp/tmp.8Qb9jiLWV7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NK9wP614ta clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.8Qb9jiLWV7 + rm /tmp/tmp.NK9wP614ta /tmp/tmp.8Qb9jiLWV7 + return 0 + check_crd_for_deletion PR-1393-7b414d13 + local git_tag=PR-1393-7b414d13 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1393-7b414d13/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ztC9uM5zJX +++ mktemp ++ local LAST_ERR=/tmp/tmp.ttD9QLgN4f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ztC9uM5zJX ++ cat /tmp/tmp.ttD9QLgN4f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ztC9uM5zJX ++ cat /tmp/tmp.ttD9QLgN4f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ztC9uM5zJX ++ cat /tmp/tmp.ttD9QLgN4f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.ztC9uM5zJX ++ cat /tmp/tmp.ttD9QLgN4f Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.ztC9uM5zJX /tmp/tmp.ttD9QLgN4f ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.aOV4wTUe9u + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_ERR=/tmp/tmp.bpC4S2DiUA + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.IDmOgUIhi1 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.MUVqphNIwS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aOV4wTUe9u + cat /tmp/tmp.bpC4S2DiUA + rm /tmp/tmp.aOV4wTUe9u /tmp/tmp.bpC4S2DiUA + return 0 namespace "cert-manager" deleted namespace "tls-issue-cert-manager-6936" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IDmOgUIhi1 namespace "psmdb-operator" deleted + cat /tmp/tmp.MUVqphNIwS + rm /tmp/tmp.IDmOgUIhi1 /tmp/tmp.MUVqphNIwS + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.oaaADUuBiN ++ mktemp + local LAST_ERR=/tmp/tmp.0TCti7aQ6V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oaaADUuBiN + cat /tmp/tmp.0TCti7aQ6V + rm /tmp/tmp.oaaADUuBiN /tmp/tmp.0TCti7aQ6V + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.8OWSuasFev ++ mktemp + local LAST_ERR=/tmp/tmp.7j4i0klhCq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8OWSuasFev namespace/psmdb-operator created + cat /tmp/tmp.7j4i0klhCq + rm /tmp/tmp.8OWSuasFev /tmp/tmp.7j4i0klhCq + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.cxvLCb08OP +++ mktemp ++ local LAST_ERR=/tmp/tmp.wea0MuRbEq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cxvLCb08OP ++ cat /tmp/tmp.wea0MuRbEq ++ rm /tmp/tmp.cxvLCb08OP /tmp/tmp.wea0MuRbEq ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Avxd6ArGsc ++ mktemp + local LAST_ERR=/tmp/tmp.IrykPhrXrW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Avxd6ArGsc Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster1" modified. + cat /tmp/tmp.IrykPhrXrW + rm /tmp/tmp.Avxd6ArGsc /tmp/tmp.IrykPhrXrW + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gDKFJq01Yx ++ mktemp + local LAST_ERR=/tmp/tmp.LDmu1ce3r7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gDKFJq01Yx customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.LDmu1ce3r7 + rm /tmp/tmp.gDKFJq01Yx /tmp/tmp.LDmu1ce3r7 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.t9aaAPfjbR ++ mktemp + local LAST_ERR=/tmp/tmp.HPcyioy1bW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t9aaAPfjbR clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.HPcyioy1bW + rm /tmp/tmp.t9aaAPfjbR /tmp/tmp.HPcyioy1bW + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1393-7b414d13") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.yNxxezNjfE ++ mktemp + local LAST_ERR=/tmp/tmp.F324zeKpuY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yNxxezNjfE deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.F324zeKpuY + rm /tmp/tmp.yNxxezNjfE /tmp/tmp.F324zeKpuY + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.11kWDj4HsQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.JcY6fGmWbF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.11kWDj4HsQ ++ cat /tmp/tmp.JcY6fGmWbF ++ rm /tmp/tmp.11kWDj4HsQ /tmp/tmp.JcY6fGmWbF ++ return 0 + wait_pod percona-server-mongodb-operator-f94797cf7-j47vq + local pod=percona-server-mongodb-operator-f94797cf7-j47vq + set +o xtrace waiting for pod/percona-server-mongodb-operator-f94797cf7-j47vq to be ready.OK + create_namespace tls-issue-cert-manager-20452 + local namespace=tls-issue-cert-manager-20452 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces tls-issue-cert-manager-20452' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces tls-issue-cert-manager-20452 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace tls-issue-cert-manager-20452 --ignore-not-found + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.YKvQ6O7Wd8 + local LAST_OUT=/tmp/tmp.jfOcB5WERA ++ mktemp ++ mktemp + xargs kubectl delete ns + local LAST_ERR=/tmp/tmp.1BY0qjklXB + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.imLi6CKj9z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace tls-issue-cert-manager-20452 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jfOcB5WERA + cat /tmp/tmp.imLi6CKj9z + rm /tmp/tmp.jfOcB5WERA /tmp/tmp.imLi6CKj9z + return 0 + kubectl_bin wait --for=delete namespace tls-issue-cert-manager-20452 ++ mktemp + local LAST_OUT=/tmp/tmp.NOML3npUrw ++ mktemp + local LAST_ERR=/tmp/tmp.RYckMtZ89O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace tls-issue-cert-manager-20452 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YKvQ6O7Wd8 + cat /tmp/tmp.1BY0qjklXB + rm /tmp/tmp.YKvQ6O7Wd8 /tmp/tmp.1BY0qjklXB + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NOML3npUrw + cat /tmp/tmp.RYckMtZ89O + rm /tmp/tmp.NOML3npUrw /tmp/tmp.RYckMtZ89O + return 0 + desc 'create namespace tls-issue-cert-manager-20452' + set +o xtrace ----------------------------------------------------------------------------------- create namespace tls-issue-cert-manager-20452 ----------------------------------------------------------------------------------- + kubectl_bin create namespace tls-issue-cert-manager-20452 ++ mktemp + local LAST_OUT=/tmp/tmp.xajkG8UM9b ++ mktemp + local LAST_ERR=/tmp/tmp.YtaZUefFgW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace tls-issue-cert-manager-20452 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xajkG8UM9b namespace/tls-issue-cert-manager-20452 created + cat /tmp/tmp.YtaZUefFgW + rm /tmp/tmp.xajkG8UM9b /tmp/tmp.YtaZUefFgW + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.RWbA0CqthM +++ mktemp ++ local LAST_ERR=/tmp/tmp.pK8FzNFh2l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RWbA0CqthM ++ cat /tmp/tmp.pK8FzNFh2l ++ rm /tmp/tmp.RWbA0CqthM /tmp/tmp.pK8FzNFh2l ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster1 --namespace=tls-issue-cert-manager-20452 ++ mktemp + local LAST_OUT=/tmp/tmp.CtXduMrua1 ++ mktemp + local LAST_ERR=/tmp/tmp.65P9MQYqT8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster1 --namespace=tls-issue-cert-manager-20452 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CtXduMrua1 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1393-7b414d13-23-cluster1" modified. + cat /tmp/tmp.65P9MQYqT8 + rm /tmp/tmp.CtXduMrua1 /tmp/tmp.65P9MQYqT8 + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.kYHjn7Cvlu ++ mktemp + local LAST_ERR=/tmp/tmp.qKVWkyQjDO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kYHjn7Cvlu namespace/cert-manager created + cat /tmp/tmp.qKVWkyQjDO + rm /tmp/tmp.kYHjn7Cvlu /tmp/tmp.qKVWkyQjDO + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.GFQzOBrz5A ++ mktemp + local LAST_ERR=/tmp/tmp.qspNYwplHS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GFQzOBrz5A namespace/cert-manager labeled + cat /tmp/tmp.qspNYwplHS + rm /tmp/tmp.GFQzOBrz5A /tmp/tmp.qspNYwplHS + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.DH9KIP3fdm ++ mktemp + local LAST_ERR=/tmp/tmp.rYsMqAvWsB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DH9KIP3fdm namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created configmap/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.rYsMqAvWsB Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.DH9KIP3fdm /tmp/tmp.rYsMqAvWsB + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.ZzhCe9Cp25 ++ mktemp + local LAST_ERR=/tmp/tmp.R5YGrHdGtg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZzhCe9Cp25 pod/cert-manager-6b8456bfd4-98jxm condition met pod/cert-manager-cainjector-79495bdbc8-5skhg condition met pod/cert-manager-webhook-56fc7669b6-dn46t condition met + cat /tmp/tmp.R5YGrHdGtg + rm /tmp/tmp.ZzhCe9Cp25 /tmp/tmp.R5YGrHdGtg + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.SrJWpSxLwQ ++ mktemp + local LAST_ERR=/tmp/tmp.TzKsOli6i5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SrJWpSxLwQ secret/some-users created + cat /tmp/tmp.TzKsOli6i5 + rm /tmp/tmp.SrJWpSxLwQ /tmp/tmp.TzKsOli6i5 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.38KywuBHZH ++ mktemp + local LAST_ERR=/tmp/tmp.tCuDZg0R4o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.38KywuBHZH deployment.apps/psmdb-client created + cat /tmp/tmp.tCuDZg0R4o + rm /tmp/tmp.38KywuBHZH /tmp/tmp.tCuDZg0R4o + return 0 + deploy_cmctl + local service_account=cmctl + /usr/bin/sed -e s/percona-server-mongodb-operator/cmctl/g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/rbac.yaml + yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.dede1UgFYu ++ mktemp + local LAST_ERR=/tmp/tmp.h17qozDYYx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dede1UgFYu role.rbac.authorization.k8s.io/cmctl created serviceaccount/cmctl created rolebinding.rbac.authorization.k8s.io/service-account-cmctl created + cat /tmp/tmp.h17qozDYYx + rm /tmp/tmp.dede1UgFYu /tmp/tmp.h17qozDYYx + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/cmctl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.rCWeic4yR8 ++ mktemp + local LAST_ERR=/tmp/tmp.B87mkjNwS4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/conf/cmctl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rCWeic4yR8 deployment.apps/cmctl created + cat /tmp/tmp.B87mkjNwS4 + rm /tmp/tmp.rCWeic4yR8 /tmp/tmp.B87mkjNwS4 + return 0 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + local LAST_OUT=/tmp/tmp.9bcjUMATfR + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1393-7b414d13"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.qsSlMHVQHQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9bcjUMATfR perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.qsSlMHVQHQ + rm /tmp/tmp.9bcjUMATfR /tmp/tmp.qsSlMHVQHQ + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready..............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CO4K5TBkU7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.WtnQ9IPoN1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CO4K5TBkU7 ++ cat /tmp/tmp.WtnQ9IPoN1 ++ rm /tmp/tmp.CO4K5TBkU7 /tmp/tmp.WtnQ9IPoN1 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HbvI6FJrAd +++ mktemp ++ local LAST_ERR=/tmp/tmp.gafCgwxfak ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HbvI6FJrAd ++ cat /tmp/tmp.gafCgwxfak ++ rm /tmp/tmp.HbvI6FJrAd /tmp/tmp.gafCgwxfak ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................ + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pAF21fwPBv +++ mktemp ++ local LAST_ERR=/tmp/tmp.H5M9WXEnu2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pAF21fwPBv ++ cat /tmp/tmp.H5M9WXEnu2 ++ rm /tmp/tmp.pAF21fwPBv /tmp/tmp.H5M9WXEnu2 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TPiHWDgUa7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3NcUB8QNeJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TPiHWDgUa7 ++ cat /tmp/tmp.3NcUB8QNeJ ++ rm /tmp/tmp.TPiHWDgUa7 /tmp/tmp.3NcUB8QNeJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HMCyPyYdGv +++ mktemp ++ local LAST_ERR=/tmp/tmp.i0XK0tuSpn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HMCyPyYdGv ++ cat /tmp/tmp.i0XK0tuSpn ++ rm /tmp/tmp.HMCyPyYdGv /tmp/tmp.i0XK0tuSpn ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ElGO2zOJ5X +++ mktemp ++ local LAST_ERR=/tmp/tmp.Crny2QMQcM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ElGO2zOJ5X ++ cat /tmp/tmp.Crny2QMQcM ++ rm /tmp/tmp.ElGO2zOJ5X /tmp/tmp.Crny2QMQcM ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.dlwSzPcVmt ++ mktemp + local LAST_ERR=/tmp/tmp.G0w9MuxwHF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dlwSzPcVmt + cat /tmp/tmp.G0w9MuxwHF + rm /tmp/tmp.dlwSzPcVmt /tmp/tmp.G0w9MuxwHF + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.HnIpXBjmoS ++ mktemp + local LAST_ERR=/tmp/tmp.jPP0CsTHeW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HnIpXBjmoS + cat /tmp/tmp.jPP0CsTHeW + rm /tmp/tmp.HnIpXBjmoS /tmp/tmp.jPP0CsTHeW + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos + local resource=statefulset/some-name-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.cDSf8TGFpt ++ mktemp + local LAST_ERR=/tmp/tmp.ClrTmE1HyT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cDSf8TGFpt + cat /tmp/tmp.ClrTmE1HyT + rm /tmp/tmp.cDSf8TGFpt /tmp/tmp.ClrTmE1HyT + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + desc 'check if certificates issued with certmanager' + set +o xtrace ----------------------------------------------------------------------------------- check if certificates issued with certmanager ----------------------------------------------------------------------------------- + check_tls_secret some-name-ssl + local secret_name=some-name-ssl + check_secret_data_key some-name-ssl ca.crt + local secret_name=some-name-ssl + local data_key=ca.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["ca.crt"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L1Ix4yLCUh +++ mktemp ++ local LAST_ERR=/tmp/tmp.LUgMSdG1kr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L1Ix4yLCUh ++ cat /tmp/tmp.LUgMSdG1kr ++ rm /tmp/tmp.L1Ix4yLCUh /tmp/tmp.LUgMSdG1kr ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrakNDQWVLZ0F3SUJBZ0lRUVFFQW45TkgzY3hEK3lZNjNSMkgvVEFOQmdrcWhraUc5dzBCQVFzRkFEQVgKTVJVd0V3WURWUVFERXd4emIyMWxMVzVoYldVdFkyRXdIaGNOTWpRd05URXdNVEUxTnpFM1doY05NalV3TlRFdwpNVEUxTnpFM1dqQVhNUlV3RXdZRFZRUURFd3h6YjIxbExXNWhiV1V0WTJFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCCkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEQmhhUHRDbW5lRmd0YnB6ZmxnTGI2THRFblRuQ3hrTThMUHdKZmM5RVIKdFhzTEZYNDVuY1MrdkhiZm5mVnNwN0pRZi9KMDB0UExBenBvQ2lHYTl3TCtUNkV4MWxKemI2ZDVRdlVoV0p5dApQa1Y0NnR3SndTdE9BdnJkNVRQRXR6WjhEL0JmdEtBNmtNU20wRml6NWpWMitBckRRYSs5NzdwbUpsOG9CelNRCnRuS3ZRMkkxY3J1dVJpcW41cEltMjR3ZHIwYS9ETlNISHN4S0RKL0p5dnlrUlE0Ymo2NktuRmlKUElqZ0N5UzAKajNnNTBNZk5DTUltbXRQVFc5N2pETmlwUFIwVSttbSs5cms4eGxsUTdXc1VVZ2thdUxFZzNJM3h6TGtobld1dwpDbzRjUXR1NjE0aVQrSkMvVi9ZS01yU3FMWUp4Wi9ra2RnS2JacmtRd1hTSkFnTUJBQUdqUWpCQU1BNEdBMVVkCkR3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCVFovR2NhRG9HazVLcjEKTElabGJjakNEbndZaHpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQXE5R1l0a0NtMlFBOHE1UnQwTWlHak1WQwpSS2NxWVUzNXUvcmZCOG1udjJ3OHBWeEhFNXM3WGczVTBqZlVLVGp2Qml1cFhkQjB3bWU5R1NFeVdPZXV0WDZVCmZKWXNBd1ZiUlI2Q3U2bHMxMThxQVZRcWxmK0RmSTNsSEhEMlVmY0VmcW1adExCMWJYa3VCM3VEZExnL2lrdjEKUk9lQTV2MlM1cDBwM3oxbDdGQlJ2eHNYTmlVZ0JpdG1SNU1USmhhVEVpTVhpeVV3MXJCN3kyY2JreGFoR3lnbQpmMWJ4eDBKVTFYVmNOR1JlTnhvQVlSSUp2OElhTEFWcWQvSUdGQ0g0ajZZaEt2dWxmY2srSjFvOEthMGdLS0dwCng2eVRQRHExU2w1angxTHZUSzZwbEZvbUFxcW9KS1g0VHZlakRZQXpvbDM2SUR6ajQ4SXRqOHYwUnNRTDNRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrakNDQWVLZ0F3SUJBZ0lRUVFFQW45TkgzY3hEK3lZNjNSMkgvVEFOQmdrcWhraUc5dzBCQVFzRkFEQVgKTVJVd0V3WURWUVFERXd4emIyMWxMVzVoYldVdFkyRXdIaGNOTWpRd05URXdNVEUxTnpFM1doY05NalV3TlRFdwpNVEUxTnpFM1dqQVhNUlV3RXdZRFZRUURFd3h6YjIxbExXNWhiV1V0WTJFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCCkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEQmhhUHRDbW5lRmd0YnB6ZmxnTGI2THRFblRuQ3hrTThMUHdKZmM5RVIKdFhzTEZYNDVuY1MrdkhiZm5mVnNwN0pRZi9KMDB0UExBenBvQ2lHYTl3TCtUNkV4MWxKemI2ZDVRdlVoV0p5dApQa1Y0NnR3SndTdE9BdnJkNVRQRXR6WjhEL0JmdEtBNmtNU20wRml6NWpWMitBckRRYSs5NzdwbUpsOG9CelNRCnRuS3ZRMkkxY3J1dVJpcW41cEltMjR3ZHIwYS9ETlNISHN4S0RKL0p5dnlrUlE0Ymo2NktuRmlKUElqZ0N5UzAKajNnNTBNZk5DTUltbXRQVFc5N2pETmlwUFIwVSttbSs5cms4eGxsUTdXc1VVZ2thdUxFZzNJM3h6TGtobld1dwpDbzRjUXR1NjE0aVQrSkMvVi9ZS01yU3FMWUp4Wi9ra2RnS2JacmtRd1hTSkFnTUJBQUdqUWpCQU1BNEdBMVVkCkR3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCVFovR2NhRG9HazVLcjEKTElabGJjakNEbndZaHpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQXE5R1l0a0NtMlFBOHE1UnQwTWlHak1WQwpSS2NxWVUzNXUvcmZCOG1udjJ3OHBWeEhFNXM3WGczVTBqZlVLVGp2Qml1cFhkQjB3bWU5R1NFeVdPZXV0WDZVCmZKWXNBd1ZiUlI2Q3U2bHMxMThxQVZRcWxmK0RmSTNsSEhEMlVmY0VmcW1adExCMWJYa3VCM3VEZExnL2lrdjEKUk9lQTV2MlM1cDBwM3oxbDdGQlJ2eHNYTmlVZ0JpdG1SNU1USmhhVEVpTVhpeVV3MXJCN3kyY2JreGFoR3lnbQpmMWJ4eDBKVTFYVmNOR1JlTnhvQVlSSUp2OElhTEFWcWQvSUdGQ0g0ajZZaEt2dWxmY2srSjFvOEthMGdLS0dwCng2eVRQRHExU2w1angxTHZUSzZwbEZvbUFxcW9KS1g0VHZlakRZQXpvbDM2SUR6ajQ4SXRqOHYwUnNRTDNRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' ']' + check_secret_data_key some-name-ssl tls.crt + local secret_name=some-name-ssl + local data_key=tls.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json +++ mktemp ++ jq '.data["tls.crt"]' ++ local LAST_OUT=/tmp/tmp.y0UYT967ix +++ mktemp ++ local LAST_ERR=/tmp/tmp.4VOHo87EsY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y0UYT967ix ++ cat /tmp/tmp.4VOHo87EsY ++ rm /tmp/tmp.y0UYT967ix /tmp/tmp.4VOHo87EsY ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg2RENDQnRDZ0F3SUJBZ0lSQUp4c21LeHZOaHJCWjdSa3pjQjNsOWt3RFFZSktvWklodmNOQVFFTEJRQXcKRnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNQjRYRFRJME1EVXhNREV4TlRjeE9Gb1hEVEkwTURndwpPREV4TlRjeE9Gb3dKREVPTUF3R0ExVUVDaE1GVUZOTlJFSXhFakFRQmdOVkJBTVRDWE52YldVdGJtRnRaVENDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTlhscXBsb1dHSWx2OHQ5MW1aT3FtR3EKL0VqZDA4OCtLRGRQTStUVysxdTFsWHBLc1NwVkJDN2FodFpTdkxLazZkUVlmalhoMzVDNzNLbWE2RVk3RW9jQgpkR0VCUUp0U2hXSTZzemR6dktOa3h1YzhJU1VUZW1VaGpnenY0Vk9xV0RHdFpRMk1xMitYS2hia2RCTkd0ZVhRCmlEQm9SUEJkQWJsaTlaKzVrWVpXaWd1NlNSTnJvZGN4UDNUWngvb3ZHaTFCQmxSMms3M3hhNWV6U1Y3bXRvOVcKdlUxRm9nMVZjTFUyYlM3VGdqNXdYMll5bWZiYS9tVi9wcENXK3NaY3U0ZmJHcHRPa0RpUHYxdGVCT3Z2cm5NZwo1dnZaQ3hXbDZMR1pYNTQyeGJLcE5kejNtemlibysreC8vMWtZSGxHUFl6SVN0NkV6SVlkNzg4S3VBY3MzbThDCkF3RUFBYU9DQlNBd2dnVWNNQTRHQTFVZER3RUIvd1FFQXdJRm9EQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWQKSXdRWU1CYUFGTm44WnhvT2dhVGtxdlVzaG1WdHlNSU9mQmlITUlJRTJRWURWUjBSQklJRTBEQ0NCTXlDQ1d4dgpZMkZzYUc5emRJSU5jMjl0WlMxdVlXMWxMWEp6TUlJcWMyOXRaUzF1WVcxbExYSnpNQzUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEl3TkRVeWdqeHpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjAKTFcxaGJtRm5aWEl0TWpBME5USXVjM1pqTG1Oc2RYTjBaWEl1Ykc5allXeUNEeW91YzI5dFpTMXVZVzFsTFhKegpNSUlzS2k1emIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1qQTBOVEtDClBpb3VjMjl0WlMxdVlXMWxMWEp6TUM1MGJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEl3TkRVeUxuTjIKWXk1amJIVnpkR1Z5TG14dlkyRnNnajl6YjIxbExXNWhiV1V0Y25Nd0xuUnNjeTFwYzNOMVpTMWpaWEowTFcxaApibUZuWlhJdE1qQTBOVEl1YzNaakxtTnNkWE4wWlhKelpYUXViRzlqWVd5Q1FTb3VjMjl0WlMxdVlXMWxMWEp6Ck1DNTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRJd05EVXlMbk4yWXk1amJIVnpkR1Z5YzJWMExteHYKWTJGc2dqTXFMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1qQTBOVEl1YzNaakxtTnNkWE4wWlhKegpaWFF1Ykc5allXeUNFSE52YldVdGJtRnRaUzF0YjI1bmIzT0NMWE52YldVdGJtRnRaUzF0YjI1bmIzTXVkR3h6CkxXbHpjM1ZsTFdObGNuUXRiV0Z1WVdkbGNpMHlNRFExTW9JL2MyOXRaUzF1WVcxbExXMXZibWR2Y3k1MGJITXQKYVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRJd05EVXlMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNnaElxTG5OdgpiV1V0Ym1GdFpTMXRiMjVuYjNPQ0x5b3VjMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTBiSE10YVhOemRXVXRZMlZ5CmRDMXRZVzVoWjJWeUxUSXdORFV5Z2tFcUxuTnZiV1V0Ym1GdFpTMXRiMjVuYjNNdWRHeHpMV2x6YzNWbExXTmwKY25RdGJXRnVZV2RsY2kweU1EUTFNaTV6ZG1NdVkyeDFjM1JsY2k1c2IyTmhiSUlOYzI5dFpTMXVZVzFsTFdObQpaNElxYzI5dFpTMXVZVzFsTFdObVp5NTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRJd05EVXlnanh6CmIyMWxMVzVoYldVdFkyWm5MblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1qQTBOVEl1YzNaakxtTnMKZFhOMFpYSXViRzlqWVd5Q0R5b3VjMjl0WlMxdVlXMWxMV05tWjRJc0tpNXpiMjFsTFc1aGJXVXRZMlpuTG5ScwpjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TWpBME5US0NQaW91YzI5dFpTMXVZVzFsTFdObVp5NTBiSE10CmFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxUSXdORFV5TG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2tKemIyMWwKTFc1aGJXVXRiVzl1WjI5ekxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TWpBME5USXVjM1pqTG1OcwpkWE4wWlhKelpYUXViRzlqWVd5Q1JDb3VjMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTBiSE10YVhOemRXVXRZMlZ5CmRDMXRZVzVoWjJWeUxUSXdORFV5TG5OMll5NWpiSFZ6ZEdWeWMyVjBMbXh2WTJGc2dqOXpiMjFsTFc1aGJXVXQKWTJabkxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TWpBME5USXVjM1pqTG1Oc2RYTjBaWEp6WlhRdQpiRzlqWVd5Q1FTb3VjMjl0WlMxdVlXMWxMV05tWnk1MGJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEl3Ck5EVXlMbk4yWXk1amJIVnpkR1Z5YzJWMExteHZZMkZzTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBN3p6aEMKcUZLVjgwZ3BjODNIMmpFTjZmUmtvNmdDN0ZjNHgvYUxmUTBaZVdxVzhKTEdiVkpYYVExQkc3SjFsK0U3VkZGZwpUZW5ZY1ZQemVLMDFFL1kzQ2ZwV2dNVGRLTUNxQTVTODdFblNMV1FWMkZpaEd6eS96dURvRzlUUDFHeWxSWmxWCmdlU2FMWlV2d2dvSVRDVGhvcWhEZ1BDZWJIUDVkN1R5c1Joc3RLWDg2enNrMU9RMlZLbi94a3c3REgxOFp6WFUKTjlnaUxqL0dabnNzdUk5QVpMUFp0TTFHN3JCbi9CSUhuYkF4Q09rV2cyeE1QcXVCbGpneW5mRDVtanVITTZDVwpoODFkSjBSK0xIQ2tFd0JYV2w5bm8zQ1JjYW1nVXJZNGFDVzh1RjhKeDl4QkxYNUZuTnhxS1RXbVRCM1lMbFJvCjZJc1hjOFZhMFVMK2wyajYKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg2RENDQnRDZ0F3SUJBZ0lSQUp4c21LeHZOaHJCWjdSa3pjQjNsOWt3RFFZSktvWklodmNOQVFFTEJRQXcKRnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNQjRYRFRJME1EVXhNREV4TlRjeE9Gb1hEVEkwTURndwpPREV4TlRjeE9Gb3dKREVPTUF3R0ExVUVDaE1GVUZOTlJFSXhFakFRQmdOVkJBTVRDWE52YldVdGJtRnRaVENDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTlhscXBsb1dHSWx2OHQ5MW1aT3FtR3EKL0VqZDA4OCtLRGRQTStUVysxdTFsWHBLc1NwVkJDN2FodFpTdkxLazZkUVlmalhoMzVDNzNLbWE2RVk3RW9jQgpkR0VCUUp0U2hXSTZzemR6dktOa3h1YzhJU1VUZW1VaGpnenY0Vk9xV0RHdFpRMk1xMitYS2hia2RCTkd0ZVhRCmlEQm9SUEJkQWJsaTlaKzVrWVpXaWd1NlNSTnJvZGN4UDNUWngvb3ZHaTFCQmxSMms3M3hhNWV6U1Y3bXRvOVcKdlUxRm9nMVZjTFUyYlM3VGdqNXdYMll5bWZiYS9tVi9wcENXK3NaY3U0ZmJHcHRPa0RpUHYxdGVCT3Z2cm5NZwo1dnZaQ3hXbDZMR1pYNTQyeGJLcE5kejNtemlibysreC8vMWtZSGxHUFl6SVN0NkV6SVlkNzg4S3VBY3MzbThDCkF3RUFBYU9DQlNBd2dnVWNNQTRHQTFVZER3RUIvd1FFQXdJRm9EQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWQKSXdRWU1CYUFGTm44WnhvT2dhVGtxdlVzaG1WdHlNSU9mQmlITUlJRTJRWURWUjBSQklJRTBEQ0NCTXlDQ1d4dgpZMkZzYUc5emRJSU5jMjl0WlMxdVlXMWxMWEp6TUlJcWMyOXRaUzF1WVcxbExYSnpNQzUwYkhNdGFYTnpkV1V0ClkyVnlkQzF0WVc1aFoyVnlMVEl3TkRVeWdqeHpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjAKTFcxaGJtRm5aWEl0TWpBME5USXVjM1pqTG1Oc2RYTjBaWEl1Ykc5allXeUNEeW91YzI5dFpTMXVZVzFsTFhKegpNSUlzS2k1emIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1qQTBOVEtDClBpb3VjMjl0WlMxdVlXMWxMWEp6TUM1MGJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEl3TkRVeUxuTjIKWXk1amJIVnpkR1Z5TG14dlkyRnNnajl6YjIxbExXNWhiV1V0Y25Nd0xuUnNjeTFwYzNOMVpTMWpaWEowTFcxaApibUZuWlhJdE1qQTBOVEl1YzNaakxtTnNkWE4wWlhKelpYUXViRzlqWVd5Q1FTb3VjMjl0WlMxdVlXMWxMWEp6Ck1DNTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRJd05EVXlMbk4yWXk1amJIVnpkR1Z5YzJWMExteHYKWTJGc2dqTXFMblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1qQTBOVEl1YzNaakxtTnNkWE4wWlhKegpaWFF1Ykc5allXeUNFSE52YldVdGJtRnRaUzF0YjI1bmIzT0NMWE52YldVdGJtRnRaUzF0YjI1bmIzTXVkR3h6CkxXbHpjM1ZsTFdObGNuUXRiV0Z1WVdkbGNpMHlNRFExTW9JL2MyOXRaUzF1WVcxbExXMXZibWR2Y3k1MGJITXQKYVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRJd05EVXlMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNnaElxTG5OdgpiV1V0Ym1GdFpTMXRiMjVuYjNPQ0x5b3VjMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTBiSE10YVhOemRXVXRZMlZ5CmRDMXRZVzVoWjJWeUxUSXdORFV5Z2tFcUxuTnZiV1V0Ym1GdFpTMXRiMjVuYjNNdWRHeHpMV2x6YzNWbExXTmwKY25RdGJXRnVZV2RsY2kweU1EUTFNaTV6ZG1NdVkyeDFjM1JsY2k1c2IyTmhiSUlOYzI5dFpTMXVZVzFsTFdObQpaNElxYzI5dFpTMXVZVzFsTFdObVp5NTBiSE10YVhOemRXVXRZMlZ5ZEMxdFlXNWhaMlZ5TFRJd05EVXlnanh6CmIyMWxMVzVoYldVdFkyWm5MblJzY3kxcGMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1qQTBOVEl1YzNaakxtTnMKZFhOMFpYSXViRzlqWVd5Q0R5b3VjMjl0WlMxdVlXMWxMV05tWjRJc0tpNXpiMjFsTFc1aGJXVXRZMlpuTG5ScwpjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TWpBME5US0NQaW91YzI5dFpTMXVZVzFsTFdObVp5NTBiSE10CmFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxUSXdORFV5TG5OMll5NWpiSFZ6ZEdWeUxteHZZMkZzZ2tKemIyMWwKTFc1aGJXVXRiVzl1WjI5ekxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TWpBME5USXVjM1pqTG1OcwpkWE4wWlhKelpYUXViRzlqWVd5Q1JDb3VjMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTBiSE10YVhOemRXVXRZMlZ5CmRDMXRZVzVoWjJWeUxUSXdORFV5TG5OMll5NWpiSFZ6ZEdWeWMyVjBMbXh2WTJGc2dqOXpiMjFsTFc1aGJXVXQKWTJabkxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TWpBME5USXVjM1pqTG1Oc2RYTjBaWEp6WlhRdQpiRzlqWVd5Q1FTb3VjMjl0WlMxdVlXMWxMV05tWnk1MGJITXRhWE56ZFdVdFkyVnlkQzF0WVc1aFoyVnlMVEl3Ck5EVXlMbk4yWXk1amJIVnpkR1Z5YzJWMExteHZZMkZzTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBN3p6aEMKcUZLVjgwZ3BjODNIMmpFTjZmUmtvNmdDN0ZjNHgvYUxmUTBaZVdxVzhKTEdiVkpYYVExQkc3SjFsK0U3VkZGZwpUZW5ZY1ZQemVLMDFFL1kzQ2ZwV2dNVGRLTUNxQTVTODdFblNMV1FWMkZpaEd6eS96dURvRzlUUDFHeWxSWmxWCmdlU2FMWlV2d2dvSVRDVGhvcWhEZ1BDZWJIUDVkN1R5c1Joc3RLWDg2enNrMU9RMlZLbi94a3c3REgxOFp6WFUKTjlnaUxqL0dabnNzdUk5QVpMUFp0TTFHN3JCbi9CSUhuYkF4Q09rV2cyeE1QcXVCbGpneW5mRDVtanVITTZDVwpoODFkSjBSK0xIQ2tFd0JYV2w5bm8zQ1JjYW1nVXJZNGFDVzh1RjhKeDl4QkxYNUZuTnhxS1RXbVRCM1lMbFJvCjZJc1hjOFZhMFVMK2wyajYKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' ']' + check_secret_data_key some-name-ssl tls.key + local secret_name=some-name-ssl + local data_key=tls.key + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["tls.key"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.l2F4ZphSTL +++ mktemp ++ local LAST_ERR=/tmp/tmp.hxUEaLonC6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.l2F4ZphSTL ++ cat /tmp/tmp.hxUEaLonC6 ++ rm /tmp/tmp.l2F4ZphSTL /tmp/tmp.hxUEaLonC6 ++ return 0 + secret_data='"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBMWVXcW1XaFlZaVcveTMzV1prNnFZYXI4U04zVHp6NG9OMDh6NU5iN1c3V1Zla3F4CktsVUVMdHFHMWxLOHNxVHAxQmgrTmVIZmtMdmNxWnJvUmpzU2h3RjBZUUZBbTFLRllqcXpOM084bzJURzV6d2gKSlJONlpTR09ETy9oVTZwWU1hMWxEWXlyYjVjcUZ1UjBFMGExNWRDSU1HaEU4RjBCdVdMMW43bVJobGFLQzdwSgpFMnVoMXpFL2RObkgraThhTFVFR1ZIYVR2ZkZybDdOSlh1YTJqMWE5VFVXaURWVnd0VFp0THRPQ1BuQmZaaktaCjl0citaWCtta0piNnhseTdoOXNhbTA2UU9JKy9XMTRFNisrdWN5RG0rOWtMRmFYb3NabGZuamJGc3FrMTNQZWIKT0p1ajc3SC8vV1JnZVVZOWpNaEszb1RNaGgzdnp3cTRCeXplYndJREFRQUJBb0lCQVFDQnRWbzAyUi92dExDUAo5K2xSOVJaQlQrelpTeHVzcXlhOEU1MkdnZysvVjJnajFyT0UxalZKTEY5a0RKdzlRT0JESys4dFFhSFBKUHoxCkpPa0IzY1NvZGV0YS8rVnJ5eWp6bm1FYm1XV2xOL0dlTzhwbHIyQWpmNnVpa0x6MWZYRXAvdG91MDBVSzBJSkgKaTVZUU1tc1B4OXN1YjZ5T2pmZ0RzY3o5WlF5b2h5bEd5Q0g1RVZPaWEvTHNIV3M0THBIa3pvN1BmNjhEdkhmVAp6Z3Q1RVRCQkJPNlRFNWpiQ1R1Ty9wYk5kRGRxc1dMc2tHL1RQRGNIbHVxdGl0emt6elY0L3NTbWJjVmdlQnNpCldRTDdhYzhlVUlVQmcyS3BHdnluTjM4aHFZUnJaZ3F0OVB2bU5GVjhmV3NXcUJmN1krbWNNNkM5OU5iWWhlK0QKWkVjSkRPWkJBb0dCQU5mUEMxYXdPUnJIRWNPQ2QvWnRXanJ4eThQeEl6RlUrWEtnbnViTGZBVWFmZW5QalNPdApJdU13c3FVZjdJOXJSeEhJYmt6TzJxZXJ6T0JDYWVHSkFLNWpiVU5kWkx0eU5FTjNuVnJBeE5iellvTExRSHE0ClhnQ3YvMFhsZzEwTmNvRVNMdm1pYlBrK2svZHFnTnNBeFJhdVMyR2Vnc0FGdE1nd2hLbDlYaE9SQW9HQkFQMjcKZTQwaHFtTXNHNVcyYkFhNG9xcUV4ZGgyZXMycVlTUFo2Y0p4MXROTWluL2hOem1nbzEzQ0wybFF6alVXOG0vQgp2WkxybXVpUU04VjNDMWVSK1o5bkhFUnVaeFQwOVhjYmEyVlUzV1hvYnlCRkduTSswOWdSak40RzBkbmJLa1lGCjlUdkFRY21LcEg5QW5CU2E4Ukxmc09OcXFxaGFQdm1yYUhsMW50SC9Bb0dBUjg4Vjk4ZDFpT3A2aDdLdkZpbDIKMzZ3N1lEeXB1QzJOUDA0ZzhhejkzSGpmTXVDa0Z4M3lRT1NnWFArTmFIK0t5MmtGQXJ2RjJBcjdoaGV6UVoragp3V05HSlNjN2NIL2swaHBJR3IyUUFmSkVjajhOZW9oRWh6Z0kyZ292dHRQVnFRc1M1Ry9la09UNHVWZ1Avazk0ClJUN0R5SHpGQnY0ZkZJbklhdGxUWjNFQ2dZRUFuUmYyNXVweEUvVzdIblpUYTlYazRLNVQwREdLMXNzbVdZd1IKcnRITXNBWXp1ZmsydC9jOUxrK01DeWg4ZzJJeHQvKzczN3IvcmYyYUVDZ2tYWk9EZWEzZFVvRUlVWEZZaU9YeApHRXZ5eDZXaEo5bWhCcmxmYitkdDBBbG80L2xOc3d6OVNJRkFPZ2djMEt5djloSDRNRGRPelg2OUtuNjFmdDhoClVCSUN3d0VDZ1lFQWsvVDV0NmhHWWxYUngvdngvMXJYOU5BRi9XR1RtVzRXV1VmU3poSnJlRzR4RG9FSDBHKzQKaWVwd3dnWUhUVG9SdnNxbXc1QnZjdVpwUWo0K0FMWFNHVjNTMnRrayt6a25TamtTemc0cDI4SlpzMlMyWXhCcApWWHBiblJwc3F1Y1lzVk5MUWNIQURJMksxQS9ndElFc1orUHR4TFVPMk4zWlN0aWNqQnVVaVRVPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo="' + '[' -z '"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBMWVXcW1XaFlZaVcveTMzV1prNnFZYXI4U04zVHp6NG9OMDh6NU5iN1c3V1Zla3F4CktsVUVMdHFHMWxLOHNxVHAxQmgrTmVIZmtMdmNxWnJvUmpzU2h3RjBZUUZBbTFLRllqcXpOM084bzJURzV6d2gKSlJONlpTR09ETy9oVTZwWU1hMWxEWXlyYjVjcUZ1UjBFMGExNWRDSU1HaEU4RjBCdVdMMW43bVJobGFLQzdwSgpFMnVoMXpFL2RObkgraThhTFVFR1ZIYVR2ZkZybDdOSlh1YTJqMWE5VFVXaURWVnd0VFp0THRPQ1BuQmZaaktaCjl0citaWCtta0piNnhseTdoOXNhbTA2UU9JKy9XMTRFNisrdWN5RG0rOWtMRmFYb3NabGZuamJGc3FrMTNQZWIKT0p1ajc3SC8vV1JnZVVZOWpNaEszb1RNaGgzdnp3cTRCeXplYndJREFRQUJBb0lCQVFDQnRWbzAyUi92dExDUAo5K2xSOVJaQlQrelpTeHVzcXlhOEU1MkdnZysvVjJnajFyT0UxalZKTEY5a0RKdzlRT0JESys4dFFhSFBKUHoxCkpPa0IzY1NvZGV0YS8rVnJ5eWp6bm1FYm1XV2xOL0dlTzhwbHIyQWpmNnVpa0x6MWZYRXAvdG91MDBVSzBJSkgKaTVZUU1tc1B4OXN1YjZ5T2pmZ0RzY3o5WlF5b2h5bEd5Q0g1RVZPaWEvTHNIV3M0THBIa3pvN1BmNjhEdkhmVAp6Z3Q1RVRCQkJPNlRFNWpiQ1R1Ty9wYk5kRGRxc1dMc2tHL1RQRGNIbHVxdGl0emt6elY0L3NTbWJjVmdlQnNpCldRTDdhYzhlVUlVQmcyS3BHdnluTjM4aHFZUnJaZ3F0OVB2bU5GVjhmV3NXcUJmN1krbWNNNkM5OU5iWWhlK0QKWkVjSkRPWkJBb0dCQU5mUEMxYXdPUnJIRWNPQ2QvWnRXanJ4eThQeEl6RlUrWEtnbnViTGZBVWFmZW5QalNPdApJdU13c3FVZjdJOXJSeEhJYmt6TzJxZXJ6T0JDYWVHSkFLNWpiVU5kWkx0eU5FTjNuVnJBeE5iellvTExRSHE0ClhnQ3YvMFhsZzEwTmNvRVNMdm1pYlBrK2svZHFnTnNBeFJhdVMyR2Vnc0FGdE1nd2hLbDlYaE9SQW9HQkFQMjcKZTQwaHFtTXNHNVcyYkFhNG9xcUV4ZGgyZXMycVlTUFo2Y0p4MXROTWluL2hOem1nbzEzQ0wybFF6alVXOG0vQgp2WkxybXVpUU04VjNDMWVSK1o5bkhFUnVaeFQwOVhjYmEyVlUzV1hvYnlCRkduTSswOWdSak40RzBkbmJLa1lGCjlUdkFRY21LcEg5QW5CU2E4Ukxmc09OcXFxaGFQdm1yYUhsMW50SC9Bb0dBUjg4Vjk4ZDFpT3A2aDdLdkZpbDIKMzZ3N1lEeXB1QzJOUDA0ZzhhejkzSGpmTXVDa0Z4M3lRT1NnWFArTmFIK0t5MmtGQXJ2RjJBcjdoaGV6UVoragp3V05HSlNjN2NIL2swaHBJR3IyUUFmSkVjajhOZW9oRWh6Z0kyZ292dHRQVnFRc1M1Ry9la09UNHVWZ1Avazk0ClJUN0R5SHpGQnY0ZkZJbklhdGxUWjNFQ2dZRUFuUmYyNXVweEUvVzdIblpUYTlYazRLNVQwREdLMXNzbVdZd1IKcnRITXNBWXp1ZmsydC9jOUxrK01DeWg4ZzJJeHQvKzczN3IvcmYyYUVDZ2tYWk9EZWEzZFVvRUlVWEZZaU9YeApHRXZ5eDZXaEo5bWhCcmxmYitkdDBBbG80L2xOc3d6OVNJRkFPZ2djMEt5djloSDRNRGRPelg2OUtuNjFmdDhoClVCSUN3d0VDZ1lFQWsvVDV0NmhHWWxYUngvdngvMXJYOU5BRi9XR1RtVzRXV1VmU3poSnJlRzR4RG9FSDBHKzQKaWVwd3dnWUhUVG9SdnNxbXc1QnZjdVpwUWo0K0FMWFNHVjNTMnRrayt6a25TamtTemc0cDI4SlpzMlMyWXhCcApWWHBiblJwc3F1Y1lzVk5MUWNIQURJMksxQS9ndElFc1orUHR4TFVPMk4zWlN0aWNqQnVVaVRVPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo="' ']' + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.Sfsq1Q8B0q ++ mktemp + local LAST_ERR=/tmp/tmp.1QDKfDcofQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Sfsq1Q8B0q + cat /tmp/tmp.1QDKfDcofQ + rm /tmp/tmp.Sfsq1Q8B0q /tmp/tmp.1QDKfDcofQ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.2fEFbaz5YN ++ mktemp + local LAST_ERR=/tmp/tmp.thoEP4PfyD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2fEFbaz5YN + cat /tmp/tmp.thoEP4PfyD + rm /tmp/tmp.2fEFbaz5YN /tmp/tmp.thoEP4PfyD + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.eRX6cvrk5o ++ mktemp + local LAST_ERR=/tmp/tmp.5sdcELN6it + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eRX6cvrk5o + cat /tmp/tmp.5sdcELN6it + rm /tmp/tmp.eRX6cvrk5o /tmp/tmp.5sdcELN6it + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | ++ mktemp (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.d2ExrLwN67 ++ mktemp + local LAST_ERR=/tmp/tmp.v6EAqeCvPu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d2ExrLwN67 + cat /tmp/tmp.v6EAqeCvPu + rm /tmp/tmp.d2ExrLwN67 /tmp/tmp.v6EAqeCvPu + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CyOTjXiAwL +++ mktemp ++ local LAST_ERR=/tmp/tmp.UmisoZ1Y0s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CyOTjXiAwL ++ cat /tmp/tmp.UmisoZ1Y0s ++ rm /tmp/tmp.CyOTjXiAwL /tmp/tmp.UmisoZ1Y0s ++ return 0 + pod_name=cmctl-69659bcd68-99g2c + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uIYS4BRFHc +++ mktemp ++ local LAST_ERR=/tmp/tmp.gOOpIVx0eF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uIYS4BRFHc ++ cat /tmp/tmp.gOOpIVx0eF ++ rm /tmp/tmp.uIYS4BRFHc /tmp/tmp.gOOpIVx0eF ++ return 0 + revision=1 + kubectl_bin exec cmctl-69659bcd68-99g2c -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.NB03BgcKfB ++ mktemp + local LAST_ERR=/tmp/tmp.iFKtOxKmf5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec cmctl-69659bcd68-99g2c -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NB03BgcKfB Manually triggered issuance of Certificate tls-issue-cert-manager-20452/some-name-ssl + cat /tmp/tmp.iFKtOxKmf5 + rm /tmp/tmp.NB03BgcKfB /tmp/tmp.iFKtOxKmf5 + return 0 + for i in '{1..10}' + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rSHGxhsGJq +++ mktemp ++ local LAST_ERR=/tmp/tmp.b1W9C5114i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rSHGxhsGJq ++ cat /tmp/tmp.b1W9C5114i ++ rm /tmp/tmp.rSHGxhsGJq /tmp/tmp.b1W9C5114i ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dpgBDwBTWN +++ mktemp ++ local LAST_ERR=/tmp/tmp.8NKVdqMuSk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dpgBDwBTWN ++ cat /tmp/tmp.8NKVdqMuSk ++ rm /tmp/tmp.dpgBDwBTWN /tmp/tmp.8NKVdqMuSk ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m5j4lvgIYK +++ mktemp ++ local LAST_ERR=/tmp/tmp.PwkEp5nf6z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m5j4lvgIYK ++ cat /tmp/tmp.PwkEp5nf6z ++ rm /tmp/tmp.m5j4lvgIYK /tmp/tmp.PwkEp5nf6z ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................................................................................................................................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UOEY4OvDTm +++ mktemp ++ local LAST_ERR=/tmp/tmp.FSsRzGz5iX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UOEY4OvDTm ++ cat /tmp/tmp.FSsRzGz5iX ++ rm /tmp/tmp.UOEY4OvDTm /tmp/tmp.FSsRzGz5iX ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.26JAmVUEwy +++ mktemp ++ local LAST_ERR=/tmp/tmp.zlnFlL0ykA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.26JAmVUEwy ++ cat /tmp/tmp.zlnFlL0ykA ++ rm /tmp/tmp.26JAmVUEwy /tmp/tmp.zlnFlL0ykA ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XasO8wXEei +++ mktemp ++ local LAST_ERR=/tmp/tmp.4tNQEyq4Km ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XasO8wXEei ++ cat /tmp/tmp.4tNQEyq4Km ++ rm /tmp/tmp.XasO8wXEei /tmp/tmp.4tNQEyq4Km ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XuQt4wMZcZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.qV4aIlsbFd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XuQt4wMZcZ ++ cat /tmp/tmp.qV4aIlsbFd ++ rm /tmp/tmp.XuQt4wMZcZ /tmp/tmp.qV4aIlsbFd ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pf4OhnksMm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ee4p2hXUwW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pf4OhnksMm ++ cat /tmp/tmp.Ee4p2hXUwW ++ rm /tmp/tmp.pf4OhnksMm /tmp/tmp.Ee4p2hXUwW ++ return 0 + pod_name=cmctl-69659bcd68-99g2c + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k1OV8xwm50 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CMjJ6RLtb2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k1OV8xwm50 ++ cat /tmp/tmp.CMjJ6RLtb2 ++ rm /tmp/tmp.k1OV8xwm50 /tmp/tmp.CMjJ6RLtb2 ++ return 0 + revision=1 + kubectl_bin exec cmctl-69659bcd68-99g2c -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.KwppUQDfkY ++ mktemp + local LAST_ERR=/tmp/tmp.RrIcbOcpKz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec cmctl-69659bcd68-99g2c -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KwppUQDfkY Manually triggered issuance of Certificate tls-issue-cert-manager-20452/some-name-ssl-internal + cat /tmp/tmp.RrIcbOcpKz + rm /tmp/tmp.KwppUQDfkY /tmp/tmp.RrIcbOcpKz + return 0 + for i in '{1..10}' + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ixqlfjf1VH +++ mktemp ++ local LAST_ERR=/tmp/tmp.b18UKAwZT1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ixqlfjf1VH ++ cat /tmp/tmp.b18UKAwZT1 ++ rm /tmp/tmp.ixqlfjf1VH /tmp/tmp.b18UKAwZT1 ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WsCd0HUNYT +++ mktemp ++ local LAST_ERR=/tmp/tmp.N2ZVym0K96 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WsCd0HUNYT ++ cat /tmp/tmp.N2ZVym0K96 ++ rm /tmp/tmp.WsCd0HUNYT /tmp/tmp.N2ZVym0K96 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kZjrK1EzFR +++ mktemp ++ local LAST_ERR=/tmp/tmp.4WyF4yqAqK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kZjrK1EzFR ++ cat /tmp/tmp.4WyF4yqAqK ++ rm /tmp/tmp.kZjrK1EzFR /tmp/tmp.4WyF4yqAqK ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................................................................................................................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IYKScvQ4ef +++ mktemp ++ local LAST_ERR=/tmp/tmp.pBKNVPbrrA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IYKScvQ4ef ++ cat /tmp/tmp.pBKNVPbrrA ++ rm /tmp/tmp.IYKScvQ4ef /tmp/tmp.pBKNVPbrrA ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TFROAOHMnf +++ mktemp ++ local LAST_ERR=/tmp/tmp.kN4dxDXcOH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TFROAOHMnf ++ cat /tmp/tmp.kN4dxDXcOH ++ rm /tmp/tmp.TFROAOHMnf /tmp/tmp.kN4dxDXcOH ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RjWwMDiENY +++ mktemp ++ local LAST_ERR=/tmp/tmp.NCxFozWyc4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RjWwMDiENY ++ cat /tmp/tmp.NCxFozWyc4 ++ rm /tmp/tmp.RjWwMDiENY /tmp/tmp.NCxFozWyc4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MyqfiBihRH +++ mktemp ++ local LAST_ERR=/tmp/tmp.IhB4L5pGdh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MyqfiBihRH ++ cat /tmp/tmp.IhB4L5pGdh ++ rm /tmp/tmp.MyqfiBihRH /tmp/tmp.IhB4L5pGdh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.m11SOFe5YP ++ mktemp + local LAST_ERR=/tmp/tmp.LjVbxVRseW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m11SOFe5YP + cat /tmp/tmp.LjVbxVRseW + rm /tmp/tmp.m11SOFe5YP /tmp/tmp.LjVbxVRseW + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-ca-issuer.yml + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.CI3QtYxrmD ++ mktemp + local LAST_ERR=/tmp/tmp.YKqnIiUzWz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CI3QtYxrmD + cat /tmp/tmp.YKqnIiUzWz + rm /tmp/tmp.CI3QtYxrmD /tmp/tmp.YKqnIiUzWz + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.J94EElvf6a/issuer_some-name-psmdb-issuer.yml + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.bvb2RJqcE3 ++ mktemp + local LAST_ERR=/tmp/tmp.1BY763Yhei + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bvb2RJqcE3 + cat /tmp/tmp.1BY763Yhei + rm /tmp/tmp.bvb2RJqcE3 /tmp/tmp.1BY763Yhei + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.J94EElvf6a/certificate_some-name-ssl.yml + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.iPD0TgSBCv ++ mktemp + local LAST_ERR=/tmp/tmp.GKYaprLGOM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iPD0TgSBCv + cat /tmp/tmp.GKYaprLGOM + rm /tmp/tmp.iPD0TgSBCv /tmp/tmp.GKYaprLGOM + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.J94EElvf6a/certificate_some-name-ssl-internal.yml + desc 'disable TLS' + set +o xtrace ----------------------------------------------------------------------------------- disable TLS ----------------------------------------------------------------------------------- + pause_cluster some-name + local cluster_name=some-name + echo 'Pausing cluster some-name' Pausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' ++ mktemp + local LAST_OUT=/tmp/tmp.0MBtiXptoZ ++ mktemp + local LAST_ERR=/tmp/tmp.lt8ANEYKH6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0MBtiXptoZ perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.lt8ANEYKH6 + rm /tmp/tmp.0MBtiXptoZ /tmp/tmp.lt8ANEYKH6 + return 0 + wait_for_cluster_state some-name paused + local cluster_name=some-name + local target_state=paused + echo -n 'Waiting for cluster to reach paused state' Waiting for cluster to reach paused state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8gGhyAclEz +++ mktemp ++ local LAST_ERR=/tmp/tmp.aBkizMDv1w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8gGhyAclEz ++ cat /tmp/tmp.aBkizMDv1w ++ rm /tmp/tmp.8gGhyAclEz /tmp/tmp.aBkizMDv1w ++ return 0 + [[ ready == paused ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tcRaqSGiWp +++ mktemp ++ local LAST_ERR=/tmp/tmp.ibT39WXv9e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tcRaqSGiWp ++ cat /tmp/tmp.ibT39WXv9e ++ rm /tmp/tmp.tcRaqSGiWp /tmp/tmp.ibT39WXv9e ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vKb6YhA6v8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lt7juvh7Cx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vKb6YhA6v8 ++ cat /tmp/tmp.Lt7juvh7Cx ++ rm /tmp/tmp.vKb6YhA6v8 /tmp/tmp.Lt7juvh7Cx ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EayRGD6Bmx +++ mktemp ++ local LAST_ERR=/tmp/tmp.w6sPD3FGtF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EayRGD6Bmx ++ cat /tmp/tmp.w6sPD3FGtF ++ rm /tmp/tmp.EayRGD6Bmx /tmp/tmp.w6sPD3FGtF ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EvSPpwQfaJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.nqolWuLI1X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EvSPpwQfaJ ++ cat /tmp/tmp.nqolWuLI1X ++ rm /tmp/tmp.EvSPpwQfaJ /tmp/tmp.nqolWuLI1X ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cls6h7GPU8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7XxGLg5ek5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cls6h7GPU8 ++ cat /tmp/tmp.7XxGLg5ek5 ++ rm /tmp/tmp.cls6h7GPU8 /tmp/tmp.7XxGLg5ek5 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.53eNPcjk0a +++ mktemp ++ local LAST_ERR=/tmp/tmp.QHbLFdMP2w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.53eNPcjk0a ++ cat /tmp/tmp.QHbLFdMP2w ++ rm /tmp/tmp.53eNPcjk0a /tmp/tmp.QHbLFdMP2w ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VgzmMjfult +++ mktemp ++ local LAST_ERR=/tmp/tmp.UN5z5wIRAB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VgzmMjfult ++ cat /tmp/tmp.UN5z5wIRAB ++ rm /tmp/tmp.VgzmMjfult /tmp/tmp.UN5z5wIRAB ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zAyDWDfqeY +++ mktemp ++ local LAST_ERR=/tmp/tmp.tjziLWKZm6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zAyDWDfqeY ++ cat /tmp/tmp.tjziLWKZm6 ++ rm /tmp/tmp.zAyDWDfqeY /tmp/tmp.tjziLWKZm6 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5ElZhwb9CP +++ mktemp ++ local LAST_ERR=/tmp/tmp.BGveeWkNGW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5ElZhwb9CP ++ cat /tmp/tmp.BGveeWkNGW ++ rm /tmp/tmp.5ElZhwb9CP /tmp/tmp.BGveeWkNGW ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cSMFswxT85 +++ mktemp ++ local LAST_ERR=/tmp/tmp.X58Wzv6bk6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cSMFswxT85 ++ cat /tmp/tmp.X58Wzv6bk6 ++ rm /tmp/tmp.cSMFswxT85 /tmp/tmp.X58Wzv6bk6 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yvnM4K5omm +++ mktemp ++ local LAST_ERR=/tmp/tmp.K0aSklCWTI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yvnM4K5omm ++ cat /tmp/tmp.K0aSklCWTI ++ rm /tmp/tmp.yvnM4K5omm /tmp/tmp.K0aSklCWTI ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cQJWTZv4BH +++ mktemp ++ local LAST_ERR=/tmp/tmp.0NWQ2k6IMj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cQJWTZv4BH ++ cat /tmp/tmp.0NWQ2k6IMj ++ rm /tmp/tmp.cQJWTZv4BH /tmp/tmp.0NWQ2k6IMj ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.omSLfHflDL +++ mktemp ++ local LAST_ERR=/tmp/tmp.vtjPpkHqG9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.omSLfHflDL ++ cat /tmp/tmp.vtjPpkHqG9 ++ rm /tmp/tmp.omSLfHflDL /tmp/tmp.vtjPpkHqG9 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NZngZftFPP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mf9ugwYmQv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NZngZftFPP ++ cat /tmp/tmp.Mf9ugwYmQv ++ rm /tmp/tmp.NZngZftFPP /tmp/tmp.Mf9ugwYmQv ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qkvHB7un7E +++ mktemp ++ local LAST_ERR=/tmp/tmp.42Pfi1BaAx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qkvHB7un7E ++ cat /tmp/tmp.42Pfi1BaAx ++ rm /tmp/tmp.qkvHB7un7E /tmp/tmp.42Pfi1BaAx ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UdmjA1LTkZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.fgPAmsE7DV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UdmjA1LTkZ ++ cat /tmp/tmp.fgPAmsE7DV ++ rm /tmp/tmp.UdmjA1LTkZ /tmp/tmp.fgPAmsE7DV ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HYWHPthlyv +++ mktemp ++ local LAST_ERR=/tmp/tmp.kCW0E5Rk7d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HYWHPthlyv ++ cat /tmp/tmp.kCW0E5Rk7d ++ rm /tmp/tmp.HYWHPthlyv /tmp/tmp.kCW0E5Rk7d ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.06vcqqqQbd +++ mktemp ++ local LAST_ERR=/tmp/tmp.FcSvzb5BRl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.06vcqqqQbd ++ cat /tmp/tmp.FcSvzb5BRl ++ rm /tmp/tmp.06vcqqqQbd /tmp/tmp.FcSvzb5BRl ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RbMk7ELyQ3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.32w5NCmsqZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RbMk7ELyQ3 ++ cat /tmp/tmp.32w5NCmsqZ ++ rm /tmp/tmp.RbMk7ELyQ3 /tmp/tmp.32w5NCmsqZ ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YcirpqRPFG +++ mktemp ++ local LAST_ERR=/tmp/tmp.jcur6qYNDM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YcirpqRPFG ++ cat /tmp/tmp.jcur6qYNDM ++ rm /tmp/tmp.YcirpqRPFG /tmp/tmp.jcur6qYNDM ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tDe0hMmFHP +++ mktemp ++ local LAST_ERR=/tmp/tmp.euWLVZfp5L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tDe0hMmFHP ++ cat /tmp/tmp.euWLVZfp5L ++ rm /tmp/tmp.tDe0hMmFHP /tmp/tmp.euWLVZfp5L ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PWTsmOvQfi +++ mktemp ++ local LAST_ERR=/tmp/tmp.WfM3y2rr2o ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PWTsmOvQfi ++ cat /tmp/tmp.WfM3y2rr2o ++ rm /tmp/tmp.PWTsmOvQfi /tmp/tmp.WfM3y2rr2o ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t7JV0uE40A +++ mktemp ++ local LAST_ERR=/tmp/tmp.TEx9SdhIKc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t7JV0uE40A ++ cat /tmp/tmp.TEx9SdhIKc ++ rm /tmp/tmp.t7JV0uE40A /tmp/tmp.TEx9SdhIKc ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ZYp5nLejc +++ mktemp ++ local LAST_ERR=/tmp/tmp.EPem23WTqf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ZYp5nLejc ++ cat /tmp/tmp.EPem23WTqf ++ rm /tmp/tmp.8ZYp5nLejc /tmp/tmp.EPem23WTqf ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DzZe7GLs2b +++ mktemp ++ local LAST_ERR=/tmp/tmp.0JfGJVelsN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DzZe7GLs2b ++ cat /tmp/tmp.0JfGJVelsN ++ rm /tmp/tmp.DzZe7GLs2b /tmp/tmp.0JfGJVelsN ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m7VC8EFj22 +++ mktemp ++ local LAST_ERR=/tmp/tmp.x8hE9tWNyb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m7VC8EFj22 ++ cat /tmp/tmp.x8hE9tWNyb ++ rm /tmp/tmp.m7VC8EFj22 /tmp/tmp.x8hE9tWNyb ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5AjBDWRJ4u +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fs7kfR2DI4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5AjBDWRJ4u ++ cat /tmp/tmp.Fs7kfR2DI4 ++ rm /tmp/tmp.5AjBDWRJ4u /tmp/tmp.Fs7kfR2DI4 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LKZ3HoOUpa +++ mktemp ++ local LAST_ERR=/tmp/tmp.NOW8bYLRPu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LKZ3HoOUpa ++ cat /tmp/tmp.NOW8bYLRPu ++ rm /tmp/tmp.LKZ3HoOUpa /tmp/tmp.NOW8bYLRPu ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Drn0MsJ4RR +++ mktemp ++ local LAST_ERR=/tmp/tmp.wiCtTaFDZs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Drn0MsJ4RR ++ cat /tmp/tmp.wiCtTaFDZs ++ rm /tmp/tmp.Drn0MsJ4RR /tmp/tmp.wiCtTaFDZs ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S8BhMjGoqW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y9hzM38Cl2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.S8BhMjGoqW ++ cat /tmp/tmp.Y9hzM38Cl2 ++ rm /tmp/tmp.S8BhMjGoqW /tmp/tmp.Y9hzM38Cl2 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L9wTXT6ubC +++ mktemp ++ local LAST_ERR=/tmp/tmp.wr8irN2czi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L9wTXT6ubC ++ cat /tmp/tmp.wr8irN2czi ++ rm /tmp/tmp.L9wTXT6ubC /tmp/tmp.wr8irN2czi ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6vE5Q70W3r +++ mktemp ++ local LAST_ERR=/tmp/tmp.Er9F0woJ1m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6vE5Q70W3r ++ cat /tmp/tmp.Er9F0woJ1m ++ rm /tmp/tmp.6vE5Q70W3r /tmp/tmp.Er9F0woJ1m ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YQJMShTvb2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NaZftr6nvu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YQJMShTvb2 ++ cat /tmp/tmp.NaZftr6nvu ++ rm /tmp/tmp.YQJMShTvb2 /tmp/tmp.NaZftr6nvu ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VsY8hVjSuv +++ mktemp ++ local LAST_ERR=/tmp/tmp.bV1GukdV36 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VsY8hVjSuv ++ cat /tmp/tmp.bV1GukdV36 ++ rm /tmp/tmp.VsY8hVjSuv /tmp/tmp.bV1GukdV36 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0ggA0BbPFV +++ mktemp ++ local LAST_ERR=/tmp/tmp.V28SsNaCnk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0ggA0BbPFV ++ cat /tmp/tmp.V28SsNaCnk ++ rm /tmp/tmp.0ggA0BbPFV /tmp/tmp.V28SsNaCnk ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nZe5pKJNtD +++ mktemp ++ local LAST_ERR=/tmp/tmp.gnLh7QcGlg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nZe5pKJNtD ++ cat /tmp/tmp.gnLh7QcGlg ++ rm /tmp/tmp.nZe5pKJNtD /tmp/tmp.gnLh7QcGlg ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jx9TkiyuOY +++ mktemp ++ local LAST_ERR=/tmp/tmp.LMVFGpWl7P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jx9TkiyuOY ++ cat /tmp/tmp.LMVFGpWl7P ++ rm /tmp/tmp.jx9TkiyuOY /tmp/tmp.LMVFGpWl7P ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CUGziXKKJq +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vb9Uop00xR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CUGziXKKJq ++ cat /tmp/tmp.Vb9Uop00xR ++ rm /tmp/tmp.CUGziXKKJq /tmp/tmp.Vb9Uop00xR ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sf4sNIvktf +++ mktemp ++ local LAST_ERR=/tmp/tmp.2T8lFmUk1M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sf4sNIvktf ++ cat /tmp/tmp.2T8lFmUk1M ++ rm /tmp/tmp.sf4sNIvktf /tmp/tmp.2T8lFmUk1M ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UNKcbMmNwz +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZIiKW0Pa19 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UNKcbMmNwz ++ cat /tmp/tmp.ZIiKW0Pa19 ++ rm /tmp/tmp.UNKcbMmNwz /tmp/tmp.ZIiKW0Pa19 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BVmPX21mSx +++ mktemp ++ local LAST_ERR=/tmp/tmp.eo2aDE4YUT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BVmPX21mSx ++ cat /tmp/tmp.eo2aDE4YUT ++ rm /tmp/tmp.BVmPX21mSx /tmp/tmp.eo2aDE4YUT ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1YAvPbVLG3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.S937mM0ZJJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1YAvPbVLG3 ++ cat /tmp/tmp.S937mM0ZJJ ++ rm /tmp/tmp.1YAvPbVLG3 /tmp/tmp.S937mM0ZJJ ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DTuvPpzn0A +++ mktemp ++ local LAST_ERR=/tmp/tmp.uvkJCkqKJg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DTuvPpzn0A ++ cat /tmp/tmp.uvkJCkqKJg ++ rm /tmp/tmp.DTuvPpzn0A /tmp/tmp.uvkJCkqKJg ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.taqJXFQN0Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.WxsoJ9Kioh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.taqJXFQN0Z ++ cat /tmp/tmp.WxsoJ9Kioh ++ rm /tmp/tmp.taqJXFQN0Z /tmp/tmp.WxsoJ9Kioh ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=45 + echo -n . .+ [[ 45 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qNwiaNQDe5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8HjUSntolo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qNwiaNQDe5 ++ cat /tmp/tmp.8HjUSntolo ++ rm /tmp/tmp.qNwiaNQDe5 /tmp/tmp.8HjUSntolo ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=46 + echo -n . .+ [[ 46 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ujvcGXBsV8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q0JTSeHzGu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ujvcGXBsV8 ++ cat /tmp/tmp.Q0JTSeHzGu ++ rm /tmp/tmp.ujvcGXBsV8 /tmp/tmp.Q0JTSeHzGu ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=47 + echo -n . .+ [[ 47 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E0fu3IJr8z +++ mktemp ++ local LAST_ERR=/tmp/tmp.SkbRRMt1kS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E0fu3IJr8z ++ cat /tmp/tmp.SkbRRMt1kS ++ rm /tmp/tmp.E0fu3IJr8z /tmp/tmp.SkbRRMt1kS ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=48 + echo -n . .+ [[ 48 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NaZZ3IuYSD +++ mktemp ++ local LAST_ERR=/tmp/tmp.izyHY1lB9F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NaZZ3IuYSD ++ cat /tmp/tmp.izyHY1lB9F ++ rm /tmp/tmp.NaZZ3IuYSD /tmp/tmp.izyHY1lB9F ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=49 + echo -n . .+ [[ 49 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9EXUJAvYHX +++ mktemp ++ local LAST_ERR=/tmp/tmp.boJGlm3xJU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9EXUJAvYHX ++ cat /tmp/tmp.boJGlm3xJU ++ rm /tmp/tmp.9EXUJAvYHX /tmp/tmp.boJGlm3xJU ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=50 + echo -n . .+ [[ 50 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o8sAIV8NXe +++ mktemp ++ local LAST_ERR=/tmp/tmp.GkAtSaE6Sp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o8sAIV8NXe ++ cat /tmp/tmp.GkAtSaE6Sp ++ rm /tmp/tmp.o8sAIV8NXe /tmp/tmp.GkAtSaE6Sp ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=51 + echo -n . .+ [[ 51 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qg9khzAYv3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pkjl1bwxIT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qg9khzAYv3 ++ cat /tmp/tmp.Pkjl1bwxIT ++ rm /tmp/tmp.Qg9khzAYv3 /tmp/tmp.Pkjl1bwxIT ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=52 + echo -n . .+ [[ 52 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LnCXJs48yO +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xo7HkO35Kd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LnCXJs48yO ++ cat /tmp/tmp.Xo7HkO35Kd ++ rm /tmp/tmp.LnCXJs48yO /tmp/tmp.Xo7HkO35Kd ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=53 + echo -n . .+ [[ 53 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6vNVjh1MVa +++ mktemp ++ local LAST_ERR=/tmp/tmp.S9l6M6T0T4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6vNVjh1MVa ++ cat /tmp/tmp.S9l6M6T0T4 ++ rm /tmp/tmp.6vNVjh1MVa /tmp/tmp.S9l6M6T0T4 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=54 + echo -n . .+ [[ 54 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.75tVDBYN4R +++ mktemp ++ local LAST_ERR=/tmp/tmp.iRfgokgnuL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.75tVDBYN4R ++ cat /tmp/tmp.iRfgokgnuL ++ rm /tmp/tmp.75tVDBYN4R /tmp/tmp.iRfgokgnuL ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=55 + echo -n . .+ [[ 55 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oiV1Li6Hiw +++ mktemp ++ local LAST_ERR=/tmp/tmp.2jG9OmxxaV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oiV1Li6Hiw ++ cat /tmp/tmp.2jG9OmxxaV ++ rm /tmp/tmp.oiV1Li6Hiw /tmp/tmp.2jG9OmxxaV ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=56 + echo -n . .+ [[ 56 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BfRw5IjymY +++ mktemp ++ local LAST_ERR=/tmp/tmp.ujJWz26Mo2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BfRw5IjymY ++ cat /tmp/tmp.ujJWz26Mo2 ++ rm /tmp/tmp.BfRw5IjymY /tmp/tmp.ujJWz26Mo2 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=57 + echo -n . .+ [[ 57 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wg4FR6YVID +++ mktemp ++ local LAST_ERR=/tmp/tmp.mJZ6qK1OFl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wg4FR6YVID ++ cat /tmp/tmp.mJZ6qK1OFl ++ rm /tmp/tmp.wg4FR6YVID /tmp/tmp.mJZ6qK1OFl ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=58 + echo -n . .+ [[ 58 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XfXcizIxq5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AUAKLzZrEA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XfXcizIxq5 ++ cat /tmp/tmp.AUAKLzZrEA ++ rm /tmp/tmp.XfXcizIxq5 /tmp/tmp.AUAKLzZrEA ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=59 + echo -n . .+ [[ 59 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AfORvc8s7S +++ mktemp ++ local LAST_ERR=/tmp/tmp.4iG4DoYT5U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AfORvc8s7S ++ cat /tmp/tmp.4iG4DoYT5U ++ rm /tmp/tmp.AfORvc8s7S /tmp/tmp.4iG4DoYT5U ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=60 + echo -n . .+ [[ 60 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8Zp28holuQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.LJ7berRhPi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8Zp28holuQ ++ cat /tmp/tmp.LJ7berRhPi ++ rm /tmp/tmp.8Zp28holuQ /tmp/tmp.LJ7berRhPi ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=61 + echo -n . .+ [[ 61 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6UJt0zSLyc +++ mktemp ++ local LAST_ERR=/tmp/tmp.S3tc3A4Juw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6UJt0zSLyc ++ cat /tmp/tmp.S3tc3A4Juw ++ rm /tmp/tmp.6UJt0zSLyc /tmp/tmp.S3tc3A4Juw ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=62 + echo -n . .+ [[ 62 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OvwGSZBajW +++ mktemp ++ local LAST_ERR=/tmp/tmp.yXTtBLaF6E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OvwGSZBajW ++ cat /tmp/tmp.yXTtBLaF6E ++ rm /tmp/tmp.OvwGSZBajW /tmp/tmp.yXTtBLaF6E ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=63 + echo -n . .+ [[ 63 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b91zED6jPh +++ mktemp ++ local LAST_ERR=/tmp/tmp.opfQXXZR36 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b91zED6jPh ++ cat /tmp/tmp.opfQXXZR36 ++ rm /tmp/tmp.b91zED6jPh /tmp/tmp.opfQXXZR36 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=64 + echo -n . .+ [[ 64 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.acFF21gTiP +++ mktemp ++ local LAST_ERR=/tmp/tmp.jjDkfURtyd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.acFF21gTiP ++ cat /tmp/tmp.jjDkfURtyd ++ rm /tmp/tmp.acFF21gTiP /tmp/tmp.jjDkfURtyd ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=65 + echo -n . .+ [[ 65 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ufZ9j1Pq1E +++ mktemp ++ local LAST_ERR=/tmp/tmp.bhxK1skhgg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ufZ9j1Pq1E ++ cat /tmp/tmp.bhxK1skhgg ++ rm /tmp/tmp.ufZ9j1Pq1E /tmp/tmp.bhxK1skhgg ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=66 + echo -n . .+ [[ 66 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ogRmeNXm6s +++ mktemp ++ local LAST_ERR=/tmp/tmp.arpAiliA23 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ogRmeNXm6s ++ cat /tmp/tmp.arpAiliA23 ++ rm /tmp/tmp.ogRmeNXm6s /tmp/tmp.arpAiliA23 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=67 + echo -n . .+ [[ 67 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5zwyfyqvdR +++ mktemp ++ local LAST_ERR=/tmp/tmp.f3Cd8YglmD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5zwyfyqvdR ++ cat /tmp/tmp.f3Cd8YglmD ++ rm /tmp/tmp.5zwyfyqvdR /tmp/tmp.f3Cd8YglmD ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=68 + echo -n . .+ [[ 68 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y4y2xmNdt4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vEyUx1NDzl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y4y2xmNdt4 ++ cat /tmp/tmp.vEyUx1NDzl ++ rm /tmp/tmp.y4y2xmNdt4 /tmp/tmp.vEyUx1NDzl ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=69 + echo -n . .+ [[ 69 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jg0kR6KOBs +++ mktemp ++ local LAST_ERR=/tmp/tmp.2TIqChkOCT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jg0kR6KOBs ++ cat /tmp/tmp.2TIqChkOCT ++ rm /tmp/tmp.jg0kR6KOBs /tmp/tmp.2TIqChkOCT ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=70 + echo -n . .+ [[ 70 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f1jhoCDQA6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XK7zlYCsPC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f1jhoCDQA6 ++ cat /tmp/tmp.XK7zlYCsPC ++ rm /tmp/tmp.f1jhoCDQA6 /tmp/tmp.XK7zlYCsPC ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=71 + echo -n . .+ [[ 71 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NGkh1iVJxQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.zAFcwjpDoW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NGkh1iVJxQ ++ cat /tmp/tmp.zAFcwjpDoW ++ rm /tmp/tmp.NGkh1iVJxQ /tmp/tmp.zAFcwjpDoW ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=72 + echo -n . .+ [[ 72 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RyCONOumI9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iSH0lCAucp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RyCONOumI9 ++ cat /tmp/tmp.iSH0lCAucp ++ rm /tmp/tmp.RyCONOumI9 /tmp/tmp.iSH0lCAucp ++ return 0 + [[ paused == paused ]] + echo + disable_tls some-name + local cluster_name=some-name + echo 'Disabling TLS for cluster some-name' Disabling TLS for cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' ++ mktemp + local LAST_OUT=/tmp/tmp.BN2xeQ6w2l ++ mktemp + local LAST_ERR=/tmp/tmp.PcHQOvWwv2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BN2xeQ6w2l perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.PcHQOvWwv2 + rm /tmp/tmp.BN2xeQ6w2l /tmp/tmp.PcHQOvWwv2 + return 0 + unpause_cluster some-name + local cluster_name=some-name + echo 'Unpausing cluster some-name' Unpausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' ++ mktemp + local LAST_OUT=/tmp/tmp.uK6HkSO5Tz ++ mktemp + local LAST_ERR=/tmp/tmp.lnDJV9guxO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uK6HkSO5Tz perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.lnDJV9guxO + rm /tmp/tmp.uK6HkSO5Tz /tmp/tmp.lnDJV9guxO + return 0 + wait_for_cluster_state some-name ready + local cluster_name=some-name + local target_state=ready + echo -n 'Waiting for cluster to reach ready state' Waiting for cluster to reach ready state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wzqOXAoETd +++ mktemp ++ local LAST_ERR=/tmp/tmp.dsG2SyfSAm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wzqOXAoETd ++ cat /tmp/tmp.dsG2SyfSAm ++ rm /tmp/tmp.wzqOXAoETd /tmp/tmp.dsG2SyfSAm ++ return 0 + [[ paused == ready ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WNpx0I3KHQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7HwMCNNhLT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WNpx0I3KHQ ++ cat /tmp/tmp.7HwMCNNhLT ++ rm /tmp/tmp.WNpx0I3KHQ /tmp/tmp.7HwMCNNhLT ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DgwHwjeqEA +++ mktemp ++ local LAST_ERR=/tmp/tmp.dp9naPYa0M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DgwHwjeqEA ++ cat /tmp/tmp.dp9naPYa0M ++ rm /tmp/tmp.DgwHwjeqEA /tmp/tmp.dp9naPYa0M ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mYgVA0c6la +++ mktemp ++ local LAST_ERR=/tmp/tmp.7uxxfRoBEb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mYgVA0c6la ++ cat /tmp/tmp.7uxxfRoBEb ++ rm /tmp/tmp.mYgVA0c6la /tmp/tmp.7uxxfRoBEb ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e6zpH4ERLz +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZB2PxTp5vW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e6zpH4ERLz ++ cat /tmp/tmp.ZB2PxTp5vW ++ rm /tmp/tmp.e6zpH4ERLz /tmp/tmp.ZB2PxTp5vW ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PQfEAIqSLc +++ mktemp ++ local LAST_ERR=/tmp/tmp.CQdm4ioYYh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PQfEAIqSLc ++ cat /tmp/tmp.CQdm4ioYYh ++ rm /tmp/tmp.PQfEAIqSLc /tmp/tmp.CQdm4ioYYh ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sQN06CnN61 +++ mktemp ++ local LAST_ERR=/tmp/tmp.h7HH9Vvx9d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sQN06CnN61 ++ cat /tmp/tmp.h7HH9Vvx9d ++ rm /tmp/tmp.sQN06CnN61 /tmp/tmp.h7HH9Vvx9d ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LXFz5DIBlS +++ mktemp ++ local LAST_ERR=/tmp/tmp.qyXoAmxQDZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LXFz5DIBlS ++ cat /tmp/tmp.qyXoAmxQDZ ++ rm /tmp/tmp.LXFz5DIBlS /tmp/tmp.qyXoAmxQDZ ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gLM1VNcPsU +++ mktemp ++ local LAST_ERR=/tmp/tmp.v0ATXCL9al ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gLM1VNcPsU ++ cat /tmp/tmp.v0ATXCL9al ++ rm /tmp/tmp.gLM1VNcPsU /tmp/tmp.v0ATXCL9al ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wJZ2NcLQk4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KCDH4IjCVO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wJZ2NcLQk4 ++ cat /tmp/tmp.KCDH4IjCVO ++ rm /tmp/tmp.wJZ2NcLQk4 /tmp/tmp.KCDH4IjCVO ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XfPLcJS5qY +++ mktemp ++ local LAST_ERR=/tmp/tmp.H3rrPjDOIs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XfPLcJS5qY ++ cat /tmp/tmp.H3rrPjDOIs ++ rm /tmp/tmp.XfPLcJS5qY /tmp/tmp.H3rrPjDOIs ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vVg0Y3Wq7Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.ekh8XjHu6X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vVg0Y3Wq7Y ++ cat /tmp/tmp.ekh8XjHu6X ++ rm /tmp/tmp.vVg0Y3Wq7Y /tmp/tmp.ekh8XjHu6X ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kKTFzfjzhq +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gao7IJABkw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kKTFzfjzhq ++ cat /tmp/tmp.Gao7IJABkw ++ rm /tmp/tmp.kKTFzfjzhq /tmp/tmp.Gao7IJABkw ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LrenzgxjGs +++ mktemp ++ local LAST_ERR=/tmp/tmp.STjBDX4tNO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LrenzgxjGs ++ cat /tmp/tmp.STjBDX4tNO ++ rm /tmp/tmp.LrenzgxjGs /tmp/tmp.STjBDX4tNO ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rk1FCul1FF +++ mktemp ++ local LAST_ERR=/tmp/tmp.EER86jymLx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rk1FCul1FF ++ cat /tmp/tmp.EER86jymLx ++ rm /tmp/tmp.rk1FCul1FF /tmp/tmp.EER86jymLx ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xWBwxUjRZm +++ mktemp ++ local LAST_ERR=/tmp/tmp.ze055NXMd5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xWBwxUjRZm ++ cat /tmp/tmp.ze055NXMd5 ++ rm /tmp/tmp.xWBwxUjRZm /tmp/tmp.ze055NXMd5 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bvrPBNYvqa +++ mktemp ++ local LAST_ERR=/tmp/tmp.EigK96T3L4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bvrPBNYvqa ++ cat /tmp/tmp.EigK96T3L4 ++ rm /tmp/tmp.bvrPBNYvqa /tmp/tmp.EigK96T3L4 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OTo5uJxZFC +++ mktemp ++ local LAST_ERR=/tmp/tmp.GsolLqudSX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OTo5uJxZFC ++ cat /tmp/tmp.GsolLqudSX ++ rm /tmp/tmp.OTo5uJxZFC /tmp/tmp.GsolLqudSX ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qKCzrcttTK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Dbf2zsf1a4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qKCzrcttTK ++ cat /tmp/tmp.Dbf2zsf1a4 ++ rm /tmp/tmp.qKCzrcttTK /tmp/tmp.Dbf2zsf1a4 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aPbzTG2sJS +++ mktemp ++ local LAST_ERR=/tmp/tmp.jdKs0uh1Qr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aPbzTG2sJS ++ cat /tmp/tmp.jdKs0uh1Qr ++ rm /tmp/tmp.aPbzTG2sJS /tmp/tmp.jdKs0uh1Qr ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g1YaYXNhjd +++ mktemp ++ local LAST_ERR=/tmp/tmp.SkvS3wtbfP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g1YaYXNhjd ++ cat /tmp/tmp.SkvS3wtbfP ++ rm /tmp/tmp.g1YaYXNhjd /tmp/tmp.SkvS3wtbfP ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KuAmvhzfVj +++ mktemp ++ local LAST_ERR=/tmp/tmp.pbfV7XjqX3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KuAmvhzfVj ++ cat /tmp/tmp.pbfV7XjqX3 ++ rm /tmp/tmp.KuAmvhzfVj /tmp/tmp.pbfV7XjqX3 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wRhxFtnDms +++ mktemp ++ local LAST_ERR=/tmp/tmp.FcX8M3ONDv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wRhxFtnDms ++ cat /tmp/tmp.FcX8M3ONDv ++ rm /tmp/tmp.wRhxFtnDms /tmp/tmp.FcX8M3ONDv ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yNjiF1pUbu +++ mktemp ++ local LAST_ERR=/tmp/tmp.H9vBIZlMtS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yNjiF1pUbu ++ cat /tmp/tmp.H9vBIZlMtS ++ rm /tmp/tmp.yNjiF1pUbu /tmp/tmp.H9vBIZlMtS ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1iAplsW46K +++ mktemp ++ local LAST_ERR=/tmp/tmp.JoVZ83KKtN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1iAplsW46K ++ cat /tmp/tmp.JoVZ83KKtN ++ rm /tmp/tmp.1iAplsW46K /tmp/tmp.JoVZ83KKtN ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oGxTQ3IBtP +++ mktemp ++ local LAST_ERR=/tmp/tmp.KDZhNBUGsr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oGxTQ3IBtP ++ cat /tmp/tmp.KDZhNBUGsr ++ rm /tmp/tmp.oGxTQ3IBtP /tmp/tmp.KDZhNBUGsr ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EkkdUAuDM5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gYHYZaxAEO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EkkdUAuDM5 ++ cat /tmp/tmp.gYHYZaxAEO ++ rm /tmp/tmp.EkkdUAuDM5 /tmp/tmp.gYHYZaxAEO ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7mCXfR08Cu +++ mktemp ++ local LAST_ERR=/tmp/tmp.vxyo5TzEG8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7mCXfR08Cu ++ cat /tmp/tmp.vxyo5TzEG8 ++ rm /tmp/tmp.7mCXfR08Cu /tmp/tmp.vxyo5TzEG8 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JHhhYC5jNL +++ mktemp ++ local LAST_ERR=/tmp/tmp.q4V6RqZZ5N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JHhhYC5jNL ++ cat /tmp/tmp.q4V6RqZZ5N ++ rm /tmp/tmp.JHhhYC5jNL /tmp/tmp.q4V6RqZZ5N ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KrHr5Lq0no +++ mktemp ++ local LAST_ERR=/tmp/tmp.su2R0sCCvg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KrHr5Lq0no ++ cat /tmp/tmp.su2R0sCCvg ++ rm /tmp/tmp.KrHr5Lq0no /tmp/tmp.su2R0sCCvg ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LrTtaISSyO +++ mktemp ++ local LAST_ERR=/tmp/tmp.fcw8mFqeaQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LrTtaISSyO ++ cat /tmp/tmp.fcw8mFqeaQ ++ rm /tmp/tmp.LrTtaISSyO /tmp/tmp.fcw8mFqeaQ ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IK5oSMslEN +++ mktemp ++ local LAST_ERR=/tmp/tmp.EM8B0xdRCk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IK5oSMslEN ++ cat /tmp/tmp.EM8B0xdRCk ++ rm /tmp/tmp.IK5oSMslEN /tmp/tmp.EM8B0xdRCk ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8pZyAV7X0S +++ mktemp ++ local LAST_ERR=/tmp/tmp.CxiwJfFFvJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8pZyAV7X0S ++ cat /tmp/tmp.CxiwJfFFvJ ++ rm /tmp/tmp.8pZyAV7X0S /tmp/tmp.CxiwJfFFvJ ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mTvLvkL4m4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yqi9tOxGTY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mTvLvkL4m4 ++ cat /tmp/tmp.Yqi9tOxGTY ++ rm /tmp/tmp.mTvLvkL4m4 /tmp/tmp.Yqi9tOxGTY ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nsel57DMRY +++ mktemp ++ local LAST_ERR=/tmp/tmp.dEBPJa3gak ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nsel57DMRY ++ cat /tmp/tmp.dEBPJa3gak ++ rm /tmp/tmp.nsel57DMRY /tmp/tmp.dEBPJa3gak ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kSr8UyWa72 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2dioqaUguX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kSr8UyWa72 ++ cat /tmp/tmp.2dioqaUguX ++ rm /tmp/tmp.kSr8UyWa72 /tmp/tmp.2dioqaUguX ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W1GJpy4kdk +++ mktemp ++ local LAST_ERR=/tmp/tmp.erELDlHEwM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W1GJpy4kdk ++ cat /tmp/tmp.erELDlHEwM ++ rm /tmp/tmp.W1GJpy4kdk /tmp/tmp.erELDlHEwM ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0fbu5ei1Nn +++ mktemp ++ local LAST_ERR=/tmp/tmp.tmJWZFQkAn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0fbu5ei1Nn ++ cat /tmp/tmp.tmJWZFQkAn ++ rm /tmp/tmp.0fbu5ei1Nn /tmp/tmp.tmJWZFQkAn ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FOgw1rzDda +++ mktemp ++ local LAST_ERR=/tmp/tmp.oljWZ0l5DY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FOgw1rzDda ++ cat /tmp/tmp.oljWZ0l5DY ++ rm /tmp/tmp.FOgw1rzDda /tmp/tmp.oljWZ0l5DY ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lybMNPH737 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bf5DtJzamG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lybMNPH737 ++ cat /tmp/tmp.Bf5DtJzamG ++ rm /tmp/tmp.lybMNPH737 /tmp/tmp.Bf5DtJzamG ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tY522Md4g9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BcTACtUDJp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tY522Md4g9 ++ cat /tmp/tmp.BcTACtUDJp ++ rm /tmp/tmp.tY522Md4g9 /tmp/tmp.BcTACtUDJp ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PyNCzdKRmW +++ mktemp ++ local LAST_ERR=/tmp/tmp.YiuGFasT5a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PyNCzdKRmW ++ cat /tmp/tmp.YiuGFasT5a ++ rm /tmp/tmp.PyNCzdKRmW /tmp/tmp.YiuGFasT5a ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q2eYsSUf5i +++ mktemp ++ local LAST_ERR=/tmp/tmp.ME0zQrk4Al ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q2eYsSUf5i ++ cat /tmp/tmp.ME0zQrk4Al ++ rm /tmp/tmp.q2eYsSUf5i /tmp/tmp.ME0zQrk4Al ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.811mcVhEPD +++ mktemp ++ local LAST_ERR=/tmp/tmp.D8ilNkkaCP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.811mcVhEPD ++ cat /tmp/tmp.D8ilNkkaCP ++ rm /tmp/tmp.811mcVhEPD /tmp/tmp.D8ilNkkaCP ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d0LYxRrL3e +++ mktemp ++ local LAST_ERR=/tmp/tmp.EvMWX5tXPd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d0LYxRrL3e ++ cat /tmp/tmp.EvMWX5tXPd ++ rm /tmp/tmp.d0LYxRrL3e /tmp/tmp.EvMWX5tXPd ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=45 + echo -n . .+ [[ 45 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ySS38H8AUo +++ mktemp ++ local LAST_ERR=/tmp/tmp.LySMjOw4Uy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ySS38H8AUo ++ cat /tmp/tmp.LySMjOw4Uy ++ rm /tmp/tmp.ySS38H8AUo /tmp/tmp.LySMjOw4Uy ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=46 + echo -n . .+ [[ 46 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j66rrCnaGG +++ mktemp ++ local LAST_ERR=/tmp/tmp.7k1OtRmtLN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j66rrCnaGG ++ cat /tmp/tmp.7k1OtRmtLN ++ rm /tmp/tmp.j66rrCnaGG /tmp/tmp.7k1OtRmtLN ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=47 + echo -n . .+ [[ 47 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cb9jcjbiEW +++ mktemp ++ local LAST_ERR=/tmp/tmp.C9TQExDsw6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cb9jcjbiEW ++ cat /tmp/tmp.C9TQExDsw6 ++ rm /tmp/tmp.cb9jcjbiEW /tmp/tmp.C9TQExDsw6 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=48 + echo -n . .+ [[ 48 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2vAInEHmFW +++ mktemp ++ local LAST_ERR=/tmp/tmp.5oqPZrUyq3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2vAInEHmFW ++ cat /tmp/tmp.5oqPZrUyq3 ++ rm /tmp/tmp.2vAInEHmFW /tmp/tmp.5oqPZrUyq3 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=49 + echo -n . .+ [[ 49 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PWr1MHWwZM +++ mktemp ++ local LAST_ERR=/tmp/tmp.mPvPx160pH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PWr1MHWwZM ++ cat /tmp/tmp.mPvPx160pH ++ rm /tmp/tmp.PWr1MHWwZM /tmp/tmp.mPvPx160pH ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=50 + echo -n . .+ [[ 50 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.17jRBRffT4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9wNAxYDnow ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.17jRBRffT4 ++ cat /tmp/tmp.9wNAxYDnow ++ rm /tmp/tmp.17jRBRffT4 /tmp/tmp.9wNAxYDnow ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=51 + echo -n . .+ [[ 51 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cnWxxMTxs5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.C4Hlu5HtJe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cnWxxMTxs5 ++ cat /tmp/tmp.C4Hlu5HtJe ++ rm /tmp/tmp.cnWxxMTxs5 /tmp/tmp.C4Hlu5HtJe ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=52 + echo -n . .+ [[ 52 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hF6qHDD9qy +++ mktemp ++ local LAST_ERR=/tmp/tmp.8cN2UiGsFA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hF6qHDD9qy ++ cat /tmp/tmp.8cN2UiGsFA ++ rm /tmp/tmp.hF6qHDD9qy /tmp/tmp.8cN2UiGsFA ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=53 + echo -n . .+ [[ 53 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SWSIskqHiF +++ mktemp ++ local LAST_ERR=/tmp/tmp.9TgF7JNAoB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SWSIskqHiF ++ cat /tmp/tmp.9TgF7JNAoB ++ rm /tmp/tmp.SWSIskqHiF /tmp/tmp.9TgF7JNAoB ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=54 + echo -n . .+ [[ 54 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UnF5fqbncY +++ mktemp ++ local LAST_ERR=/tmp/tmp.65GYRNDIvl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UnF5fqbncY ++ cat /tmp/tmp.65GYRNDIvl ++ rm /tmp/tmp.UnF5fqbncY /tmp/tmp.65GYRNDIvl ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=55 + echo -n . .+ [[ 55 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e347NBY64J +++ mktemp ++ local LAST_ERR=/tmp/tmp.6mJAtP9wRa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e347NBY64J ++ cat /tmp/tmp.6mJAtP9wRa ++ rm /tmp/tmp.e347NBY64J /tmp/tmp.6mJAtP9wRa ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=56 + echo -n . .+ [[ 56 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rthlHAnWKy +++ mktemp ++ local LAST_ERR=/tmp/tmp.2sXlLanaHY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rthlHAnWKy ++ cat /tmp/tmp.2sXlLanaHY ++ rm /tmp/tmp.rthlHAnWKy /tmp/tmp.2sXlLanaHY ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=57 + echo -n . .+ [[ 57 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8fgmFdxoe5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eGYqUcRFQp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8fgmFdxoe5 ++ cat /tmp/tmp.eGYqUcRFQp ++ rm /tmp/tmp.8fgmFdxoe5 /tmp/tmp.eGYqUcRFQp ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=58 + echo -n . .+ [[ 58 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AVdTQSTFok +++ mktemp ++ local LAST_ERR=/tmp/tmp.cV0mPOASdP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AVdTQSTFok ++ cat /tmp/tmp.cV0mPOASdP ++ rm /tmp/tmp.AVdTQSTFok /tmp/tmp.cV0mPOASdP ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=59 + echo -n . .+ [[ 59 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CpP00eXsCJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.3QP1s9dQuU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CpP00eXsCJ ++ cat /tmp/tmp.3QP1s9dQuU ++ rm /tmp/tmp.CpP00eXsCJ /tmp/tmp.3QP1s9dQuU ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=60 + echo -n . .+ [[ 60 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TMA87B2rRK +++ mktemp ++ local LAST_ERR=/tmp/tmp.85ONhhP81V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TMA87B2rRK ++ cat /tmp/tmp.85ONhhP81V ++ rm /tmp/tmp.TMA87B2rRK /tmp/tmp.85ONhhP81V ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=61 + echo -n . .+ [[ 61 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3GBGbtDSFJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.NQZ7mN9SWj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3GBGbtDSFJ ++ cat /tmp/tmp.NQZ7mN9SWj ++ rm /tmp/tmp.3GBGbtDSFJ /tmp/tmp.NQZ7mN9SWj ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=62 + echo -n . .+ [[ 62 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c1iowzDOOk +++ mktemp ++ local LAST_ERR=/tmp/tmp.AteBSnre5s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c1iowzDOOk ++ cat /tmp/tmp.AteBSnre5s ++ rm /tmp/tmp.c1iowzDOOk /tmp/tmp.AteBSnre5s ++ return 0 + [[ ready == ready ]] + echo + compare_kubectl statefulset/some-name-rs0 -tls-disabled + local resource=statefulset/some-name-rs0 + local postfix=-tls-disabled + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml + local new_result=/tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.iEAoclLyql ++ mktemp + local LAST_ERR=/tmp/tmp.OcZI7wtW1d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iEAoclLyql + cat /tmp/tmp.OcZI7wtW1d + rm /tmp/tmp.iEAoclLyql /tmp/tmp.OcZI7wtW1d + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml /tmp/tmp.J94EElvf6a/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-cfg -tls-disabled + local resource=statefulset/some-name-cfg + local postfix=-tls-disabled + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml + local new_result=/tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.YHZPfpsfnT ++ mktemp + local LAST_ERR=/tmp/tmp.p9vtfPISTF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YHZPfpsfnT + cat /tmp/tmp.p9vtfPISTF + rm /tmp/tmp.YHZPfpsfnT /tmp/tmp.p9vtfPISTF + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml /tmp/tmp.J94EElvf6a/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos -tls-disabled + local resource=statefulset/some-name-mongos + local postfix=-tls-disabled + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml + local new_result=/tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-20452", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.oSxNXQ973a ++ mktemp + local LAST_ERR=/tmp/tmp.csJ6dx1SDo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oSxNXQ973a + cat /tmp/tmp.csJ6dx1SDo + rm /tmp/tmp.oSxNXQ973a /tmp/tmp.csJ6dx1SDo + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml /tmp/tmp.J94EElvf6a/statefulset_some-name-mongos.yml + destroy tls-issue-cert-manager-20452 + local namespace=tls-issue-cert-manager-20452 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.GDM2Cxz6Ge ++ mktemp + local LAST_ERR=/tmp/tmp.9BRhwbM1uo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GDM2Cxz6Ge customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.9BRhwbM1uo + rm /tmp/tmp.GDM2Cxz6Ge /tmp/tmp.9BRhwbM1uo + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xELQARMa1n ++ mktemp + local LAST_ERR=/tmp/tmp.pzh78yU3Ry + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xELQARMa1n + cat /tmp/tmp.pzh78yU3Ry + rm /tmp/tmp.xELQARMa1n /tmp/tmp.pzh78yU3Ry + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.1gTDZUOjPq ++ mktemp + local LAST_ERR=/tmp/tmp.Oa1inlgaRO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1gTDZUOjPq + cat /tmp/tmp.Oa1inlgaRO + rm /tmp/tmp.1gTDZUOjPq /tmp/tmp.Oa1inlgaRO + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.6vvnC9xhG7 ++ mktemp + local LAST_ERR=/tmp/tmp.lkpYpAPoDA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6vvnC9xhG7 + cat /tmp/tmp.lkpYpAPoDA + rm /tmp/tmp.6vvnC9xhG7 /tmp/tmp.lkpYpAPoDA + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.vlWG5a0LzN ++ mktemp + local LAST_ERR=/tmp/tmp.8YxupJJ8Xa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1393/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vlWG5a0LzN clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.8YxupJJ8Xa + rm /tmp/tmp.vlWG5a0LzN /tmp/tmp.8YxupJJ8Xa + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted configmap "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted namespace "cert-manager" deleted + : + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace tls-issue-cert-manager-20452 + rm -rf /tmp/tmp.J94EElvf6a + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.DJJx0qqXhG + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed+ local LAST_OUT=/tmp/tmp.zc3j5BtS5h ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.t3q6EG62NM + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.XyAtLuNlAR + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace tls-issue-cert-manager-20452