Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/logs/tls-issue-cert-manager.log WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 + main + create_infra tls-issue-cert-manager-11961 + local ns=tls-issue-cert-manager-11961 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.dijlbJg3RM ++ mktemp + local LAST_ERR=/tmp/tmp.LGEH34Hhpt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dijlbJg3RM customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.LGEH34Hhpt + rm /tmp/tmp.dijlbJg3RM /tmp/tmp.LGEH34Hhpt + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.wone1JbsSO ++ mktemp + local LAST_ERR=/tmp/tmp.nTYJGbsac5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wone1JbsSO + cat /tmp/tmp.nTYJGbsac5 + rm /tmp/tmp.wone1JbsSO /tmp/tmp.nTYJGbsac5 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dzWl0sLqVL ++ mktemp + local LAST_ERR=/tmp/tmp.PraIs8taf3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dzWl0sLqVL + cat /tmp/tmp.PraIs8taf3 + rm /tmp/tmp.dzWl0sLqVL /tmp/tmp.PraIs8taf3 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.gZHwmKvQ5C ++ mktemp + local LAST_ERR=/tmp/tmp.xHxILYyj94 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gZHwmKvQ5C + cat /tmp/tmp.xHxILYyj94 + rm /tmp/tmp.gZHwmKvQ5C /tmp/tmp.xHxILYyj94 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.6VFM9ylZUS ++ mktemp + local LAST_ERR=/tmp/tmp.zJSXhISI3O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6VFM9ylZUS clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.zJSXhISI3O + rm /tmp/tmp.6VFM9ylZUS /tmp/tmp.zJSXhISI3O + return 0 + check_crd_for_deletion PR-1598-171aada3 + local git_tag=PR-1598-171aada3 ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1598-171aada3/deploy/crd.yaml + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.67MlhJUxAB +++ mktemp ++ local LAST_ERR=/tmp/tmp.SEO9pyDdYs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.67MlhJUxAB ++ cat /tmp/tmp.SEO9pyDdYs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.67MlhJUxAB ++ cat /tmp/tmp.SEO9pyDdYs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.67MlhJUxAB ++ cat /tmp/tmp.SEO9pyDdYs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.67MlhJUxAB ++ cat /tmp/tmp.SEO9pyDdYs Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.67MlhJUxAB /tmp/tmp.SEO9pyDdYs ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' ++ mktemp + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.StSjhFyEOQ + local LAST_OUT=/tmp/tmp.AgO9tp1eNa ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.OWmmIwI2qa + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.oDdbmHpdhZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AgO9tp1eNa + cat /tmp/tmp.OWmmIwI2qa + rm /tmp/tmp.AgO9tp1eNa /tmp/tmp.OWmmIwI2qa + return 0 namespace "cert-manager" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "tls-issue-cert-manager-7952" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.StSjhFyEOQ namespace "psmdb-operator" deleted + cat /tmp/tmp.oDdbmHpdhZ + rm /tmp/tmp.StSjhFyEOQ /tmp/tmp.oDdbmHpdhZ + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.wZ5nEvuYBj ++ mktemp + local LAST_ERR=/tmp/tmp.1ffOaO40Si + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wZ5nEvuYBj + cat /tmp/tmp.1ffOaO40Si + rm /tmp/tmp.wZ5nEvuYBj /tmp/tmp.1ffOaO40Si + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ILomm6QiBi ++ mktemp + local LAST_ERR=/tmp/tmp.J3k0mzvtt1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ILomm6QiBi namespace/psmdb-operator created + cat /tmp/tmp.J3k0mzvtt1 + rm /tmp/tmp.ILomm6QiBi /tmp/tmp.J3k0mzvtt1 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.0ymHkPYo6b +++ mktemp ++ local LAST_ERR=/tmp/tmp.xPsVUJ0Lgn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0ymHkPYo6b ++ cat /tmp/tmp.xPsVUJ0Lgn ++ rm /tmp/tmp.0ymHkPYo6b /tmp/tmp.xPsVUJ0Lgn ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1598-171aada3-1-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pNjpeoT22c ++ mktemp + local LAST_ERR=/tmp/tmp.SHV9OWsMyB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1598-171aada3-1-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pNjpeoT22c Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1598-171aada3-1-cluster8" modified. + cat /tmp/tmp.SHV9OWsMyB + rm /tmp/tmp.pNjpeoT22c /tmp/tmp.SHV9OWsMyB + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gFZ1DlQLRh ++ mktemp + local LAST_ERR=/tmp/tmp.WXOT0s6f0e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gFZ1DlQLRh customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.WXOT0s6f0e + rm /tmp/tmp.gFZ1DlQLRh /tmp/tmp.WXOT0s6f0e + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + kubectl_bin apply -n psmdb-operator -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.IfMZQOPUJ9 ++ mktemp + local LAST_ERR=/tmp/tmp.ulIa24Nhjy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IfMZQOPUJ9 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ulIa24Nhjy + rm /tmp/tmp.IfMZQOPUJ9 /tmp/tmp.ulIa24Nhjy + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1598-171aada3") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.szK1buVtb9 ++ mktemp + local LAST_ERR=/tmp/tmp.n2UsRooZGm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.szK1buVtb9 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.n2UsRooZGm + rm /tmp/tmp.szK1buVtb9 /tmp/tmp.n2UsRooZGm + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.t4YUJ0wlJC +++ mktemp ++ local LAST_ERR=/tmp/tmp.jaRFrQSvwR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t4YUJ0wlJC ++ cat /tmp/tmp.jaRFrQSvwR ++ rm /tmp/tmp.t4YUJ0wlJC /tmp/tmp.jaRFrQSvwR ++ return 0 + wait_pod percona-server-mongodb-operator-6b5f444cb7-q9dfp + local pod=percona-server-mongodb-operator-6b5f444cb7-q9dfp + set +o xtrace waiting for pod/percona-server-mongodb-operator-6b5f444cb7-q9dfp to be ready.OK + create_namespace tls-issue-cert-manager-11961 + local namespace=tls-issue-cert-manager-11961 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces tls-issue-cert-manager-11961' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces tls-issue-cert-manager-11961 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace tls-issue-cert-manager-11961 --ignore-not-found ++ mktemp + xargs kubectl delete ns + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + local LAST_OUT=/tmp/tmp.8wIyC0rNKc ++ mktemp + local LAST_OUT=/tmp/tmp.7iRBFMrwWq ++ mktemp + local LAST_ERR=/tmp/tmp.RlrMvWv6ga + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.dZT25gozqk + local exit_status=0 + local timeout=4 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace tls-issue-cert-manager-11961 --ignore-not-found ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7iRBFMrwWq + cat /tmp/tmp.dZT25gozqk + rm /tmp/tmp.7iRBFMrwWq /tmp/tmp.dZT25gozqk + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8wIyC0rNKc + cat /tmp/tmp.RlrMvWv6ga + rm /tmp/tmp.8wIyC0rNKc /tmp/tmp.RlrMvWv6ga + return 0 + kubectl_bin wait --for=delete namespace tls-issue-cert-manager-11961 ++ mktemp + local LAST_OUT=/tmp/tmp.Omnb4MPFu5 ++ mktemp + local LAST_ERR=/tmp/tmp.Kaah2ImGWZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace tls-issue-cert-manager-11961 namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Omnb4MPFu5 + cat /tmp/tmp.Kaah2ImGWZ + rm /tmp/tmp.Omnb4MPFu5 /tmp/tmp.Kaah2ImGWZ + return 0 + desc 'create namespace tls-issue-cert-manager-11961' + set +o xtrace ----------------------------------------------------------------------------------- create namespace tls-issue-cert-manager-11961 ----------------------------------------------------------------------------------- + kubectl_bin create namespace tls-issue-cert-manager-11961 ++ mktemp + local LAST_OUT=/tmp/tmp.3THbAtmZOS ++ mktemp + local LAST_ERR=/tmp/tmp.foSl6ZnAF3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace tls-issue-cert-manager-11961 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3THbAtmZOS namespace/tls-issue-cert-manager-11961 created + cat /tmp/tmp.foSl6ZnAF3 + rm /tmp/tmp.3THbAtmZOS /tmp/tmp.foSl6ZnAF3 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nsPf3nrvAJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.PAUhbBfKue ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nsPf3nrvAJ ++ cat /tmp/tmp.PAUhbBfKue ++ rm /tmp/tmp.nsPf3nrvAJ /tmp/tmp.PAUhbBfKue ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1598-171aada3-1-cluster8 --namespace=tls-issue-cert-manager-11961 ++ mktemp + local LAST_OUT=/tmp/tmp.WQx3zxon2G ++ mktemp + local LAST_ERR=/tmp/tmp.hOvdIeeEQA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1598-171aada3-1-cluster8 --namespace=tls-issue-cert-manager-11961 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WQx3zxon2G Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1598-171aada3-1-cluster8" modified. + cat /tmp/tmp.hOvdIeeEQA + rm /tmp/tmp.WQx3zxon2G /tmp/tmp.hOvdIeeEQA + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.Igq9MWt1ev ++ mktemp + local LAST_ERR=/tmp/tmp.sOI7JzEicF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Igq9MWt1ev namespace/cert-manager created + cat /tmp/tmp.sOI7JzEicF + rm /tmp/tmp.Igq9MWt1ev /tmp/tmp.sOI7JzEicF + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.MAaggnKBC5 ++ mktemp + local LAST_ERR=/tmp/tmp.wn1PDggAoR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MAaggnKBC5 namespace/cert-manager labeled + cat /tmp/tmp.wn1PDggAoR + rm /tmp/tmp.MAaggnKBC5 /tmp/tmp.wn1PDggAoR + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.uUwIrUMuO0 ++ mktemp + local LAST_ERR=/tmp/tmp.05RnB6tsvq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uUwIrUMuO0 namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.05RnB6tsvq Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.uUwIrUMuO0 /tmp/tmp.05RnB6tsvq + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.2AJn0tXGeg ++ mktemp + local LAST_ERR=/tmp/tmp.ToGdTTbwR2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2AJn0tXGeg pod/cert-manager-bd44d64d-kdxl2 condition met pod/cert-manager-cainjector-7dcddbd8b9-x6x8s condition met pod/cert-manager-webhook-6cc8fdfd7-rbtrj condition met + cat /tmp/tmp.ToGdTTbwR2 + rm /tmp/tmp.2AJn0tXGeg /tmp/tmp.ToGdTTbwR2 + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.S63PUXk1OF ++ mktemp + local LAST_ERR=/tmp/tmp.V79b3IWx1I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S63PUXk1OF secret/some-users created + cat /tmp/tmp.V79b3IWx1I + rm /tmp/tmp.S63PUXk1OF /tmp/tmp.V79b3IWx1I + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Bp9uAr9hi5 ++ mktemp + local LAST_ERR=/tmp/tmp.1A5EY2htVa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bp9uAr9hi5 deployment.apps/psmdb-client created + cat /tmp/tmp.1A5EY2htVa + rm /tmp/tmp.Bp9uAr9hi5 /tmp/tmp.1A5EY2htVa + return 0 + desc 'create custom cert-manager issuers and certificates' + set +o xtrace ----------------------------------------------------------------------------------- create custom cert-manager issuers and certificates ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.2zaMGpOuEF ++ mktemp + local LAST_ERR=/tmp/tmp.bqsk4b2JI6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2zaMGpOuEF issuer.cert-manager.io/some-name-psmdb-ca-issuer created + cat /tmp/tmp.bqsk4b2JI6 + rm /tmp/tmp.2zaMGpOuEF /tmp/tmp.bqsk4b2JI6 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.vLnlpvS9fD ++ mktemp + local LAST_ERR=/tmp/tmp.LESL2zXeDW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vLnlpvS9fD issuer.cert-manager.io/some-name-psmdb-issuer created + cat /tmp/tmp.LESL2zXeDW + rm /tmp/tmp.vLnlpvS9fD /tmp/tmp.LESL2zXeDW + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6E5r1tX5Hs ++ mktemp + local LAST_ERR=/tmp/tmp.omxOmJ1xYB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6E5r1tX5Hs certificate.cert-manager.io/some-name-ca-cert created + cat /tmp/tmp.omxOmJ1xYB + rm /tmp/tmp.6E5r1tX5Hs /tmp/tmp.omxOmJ1xYB + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qhGvUqngoG ++ mktemp + local LAST_ERR=/tmp/tmp.dxYZ93zj8I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qhGvUqngoG certificate.cert-manager.io/some-name-ssl-internal created + cat /tmp/tmp.dxYZ93zj8I + rm /tmp/tmp.qhGvUqngoG /tmp/tmp.dxYZ93zj8I + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Ax6ShV7j4N ++ mktemp + local LAST_ERR=/tmp/tmp.eIsMZKDwa7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ax6ShV7j4N certificate.cert-manager.io/some-name-ssl created + cat /tmp/tmp.eIsMZKDwa7 + rm /tmp/tmp.Ax6ShV7j4N /tmp/tmp.eIsMZKDwa7 + return 0 + deploy_cmctl + local service_account=cmctl + yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' + /usr/bin/sed -e s/percona-server-mongodb-operator/cmctl/g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/rbac.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.bShs6fPwUk ++ mktemp + local LAST_ERR=/tmp/tmp.VFnQjUVX3f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bShs6fPwUk role.rbac.authorization.k8s.io/cmctl created serviceaccount/cmctl created rolebinding.rbac.authorization.k8s.io/service-account-cmctl created + cat /tmp/tmp.VFnQjUVX3f + rm /tmp/tmp.bShs6fPwUk /tmp/tmp.VFnQjUVX3f + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/conf/cmctl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.8RSzH39Km7 ++ mktemp + local LAST_ERR=/tmp/tmp.nk3zPlI3xj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/conf/cmctl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8RSzH39Km7 deployment.apps/cmctl created + cat /tmp/tmp.nk3zPlI3xj + rm /tmp/tmp.8RSzH39Km7 /tmp/tmp.nk3zPlI3xj + return 0 + sleep 60 + cluster=some-name + desc 'create first PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.oyQrB1ob7B + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1598-171aada3"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.mpsyZG03Vw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oyQrB1ob7B perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.mpsyZG03Vw + rm /tmp/tmp.oyQrB1ob7B /tmp/tmp.mpsyZG03Vw + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wEVViO9ZqJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.hJrFsX47vm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wEVViO9ZqJ ++ cat /tmp/tmp.hJrFsX47vm ++ rm /tmp/tmp.wEVViO9ZqJ /tmp/tmp.hJrFsX47vm ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3deGEuWNRC +++ mktemp ++ local LAST_ERR=/tmp/tmp.lF2emEL1Jf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3deGEuWNRC ++ cat /tmp/tmp.lF2emEL1Jf ++ rm /tmp/tmp.3deGEuWNRC /tmp/tmp.lF2emEL1Jf ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.drR3wYRYtC +++ mktemp ++ local LAST_ERR=/tmp/tmp.uZ0udfdaBR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.drR3wYRYtC ++ cat /tmp/tmp.uZ0udfdaBR ++ rm /tmp/tmp.drR3wYRYtC /tmp/tmp.uZ0udfdaBR ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xo9LX460ya +++ mktemp ++ local LAST_ERR=/tmp/tmp.qDGMYas3qA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xo9LX460ya ++ cat /tmp/tmp.qDGMYas3qA ++ rm /tmp/tmp.xo9LX460ya /tmp/tmp.qDGMYas3qA ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zKWWYiUI6D +++ mktemp ++ local LAST_ERR=/tmp/tmp.x50DkrfwDv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zKWWYiUI6D ++ cat /tmp/tmp.x50DkrfwDv ++ rm /tmp/tmp.zKWWYiUI6D /tmp/tmp.x50DkrfwDv ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VfMxWSY8YY +++ mktemp ++ local LAST_ERR=/tmp/tmp.bzKQQQgC5l ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VfMxWSY8YY ++ cat /tmp/tmp.bzKQQQgC5l ++ rm /tmp/tmp.VfMxWSY8YY /tmp/tmp.bzKQQQgC5l ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'compare custom certificates and issuers' + set +o xtrace ----------------------------------------------------------------------------------- compare custom certificates and issuers ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl -custom + local resource=certificate/some-name-ssl + local postfix=-custom + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml + local new_result=/tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.cdBEbtHFd9 ++ mktemp + local LAST_ERR=/tmp/tmp.lCFZUneKNr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cdBEbtHFd9 + cat /tmp/tmp.lCFZUneKNr + rm /tmp/tmp.cdBEbtHFd9 /tmp/tmp.lCFZUneKNr + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-custom.yml /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + compare_kubectl certificate/some-name-ssl-internal -custom + local resource=certificate/some-name-ssl-internal + local postfix=-custom + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml + local new_result=/tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vnseM5YFnl ++ mktemp + local LAST_ERR=/tmp/tmp.sifVAk9fxP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vnseM5YFnl + cat /tmp/tmp.sifVAk9fxP + rm /tmp/tmp.vnseM5YFnl /tmp/tmp.sifVAk9fxP + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-custom.yml /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + compare_kubectl certificate/some-name-ca-cert -custom + local resource=certificate/some-name-ca-cert + local postfix=-custom + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml + local new_result=/tmp/tmp.1McPIVQius/certificate_some-name-ca-cert.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ca-cert + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.fkkGvl71hG ++ mktemp + local LAST_ERR=/tmp/tmp.3U7G7yZrdR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ca-cert + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fkkGvl71hG + cat /tmp/tmp.3U7G7yZrdR + rm /tmp/tmp.fkkGvl71hG /tmp/tmp.3U7G7yZrdR + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ca-cert.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ca-cert.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/certificate_some-name-ca-cert.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ca-cert-custom.yml /tmp/tmp.1McPIVQius/certificate_some-name-ca-cert.yml + compare_kubectl issuer/some-name-psmdb-ca-issuer -custom + local resource=issuer/some-name-psmdb-ca-issuer + local postfix=-custom + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml + local new_result=/tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.JW7kY2J1mo ++ mktemp + local LAST_ERR=/tmp/tmp.UEoAVSAImQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JW7kY2J1mo + cat /tmp/tmp.UEoAVSAImQ + rm /tmp/tmp.JW7kY2J1mo /tmp/tmp.UEoAVSAImQ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-custom.yml /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + compare_kubectl issuer/some-name-psmdb-issuer -custom + local resource=issuer/some-name-psmdb-issuer + local postfix=-custom + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml + local new_result=/tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.rHPAtxyd6J ++ mktemp + local LAST_ERR=/tmp/tmp.Jsa60EUEKm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rHPAtxyd6J + cat /tmp/tmp.Jsa60EUEKm + rm /tmp/tmp.rHPAtxyd6J /tmp/tmp.Jsa60EUEKm + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-custom.yml /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + desc 'delete cluster' + set +o xtrace ----------------------------------------------------------------------------------- delete cluster ----------------------------------------------------------------------------------- + kubectl delete psmdb --all perconaservermongodb.psmdb.percona.com "some-name" deleted + kubectl delete pvc --all persistentvolumeclaim "mongod-data-some-name-cfg-0" deleted persistentvolumeclaim "mongod-data-some-name-cfg-1" deleted persistentvolumeclaim "mongod-data-some-name-cfg-2" deleted persistentvolumeclaim "mongod-data-some-name-rs0-0" deleted persistentvolumeclaim "mongod-data-some-name-rs0-1" deleted persistentvolumeclaim "mongod-data-some-name-rs0-2" deleted + desc 'delete custom cert-manager issuers and certificates' + set +o xtrace ----------------------------------------------------------------------------------- delete custom cert-manager issuers and certificates ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Wmn1KswIZ5 ++ mktemp + local LAST_ERR=/tmp/tmp.Ud3VxIA5aV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-ca-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Wmn1KswIZ5 issuer.cert-manager.io "some-name-psmdb-ca-issuer" deleted + cat /tmp/tmp.Ud3VxIA5aV + rm /tmp/tmp.Wmn1KswIZ5 /tmp/tmp.Ud3VxIA5aV + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml ++ mktemp + local LAST_OUT=/tmp/tmp.OD7zldHjwq ++ mktemp + local LAST_ERR=/tmp/tmp.Skt00K6jWk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-psmdb-issuer.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OD7zldHjwq issuer.cert-manager.io "some-name-psmdb-issuer" deleted + cat /tmp/tmp.Skt00K6jWk + rm /tmp/tmp.OD7zldHjwq /tmp/tmp.Skt00K6jWk + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml ++ mktemp + local LAST_OUT=/tmp/tmp.uTeycQ7037 ++ mktemp + local LAST_ERR=/tmp/tmp.6vt9xHcaAc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ca-cert.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uTeycQ7037 certificate.cert-manager.io "some-name-ca-cert" deleted + cat /tmp/tmp.6vt9xHcaAc + rm /tmp/tmp.uTeycQ7037 /tmp/tmp.6vt9xHcaAc + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml ++ mktemp + local LAST_OUT=/tmp/tmp.oNljetpckL ++ mktemp + local LAST_ERR=/tmp/tmp.LeICGrz2qq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl-internal.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oNljetpckL certificate.cert-manager.io "some-name-ssl-internal" deleted + cat /tmp/tmp.LeICGrz2qq + rm /tmp/tmp.oNljetpckL /tmp/tmp.LeICGrz2qq + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.nUltoeCMFD ++ mktemp + local LAST_ERR=/tmp/tmp.XqTLSrAAfj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name-ssl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nUltoeCMFD certificate.cert-manager.io "some-name-ssl" deleted + cat /tmp/tmp.XqTLSrAAfj + rm /tmp/tmp.nUltoeCMFD /tmp/tmp.XqTLSrAAfj + return 0 + sleep 30 + desc 'delete ssl secrets, operator should recreate them' + set +o xtrace ----------------------------------------------------------------------------------- delete ssl secrets, operator should recreate them ----------------------------------------------------------------------------------- + kubectl_bin delete secret some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.8vQSYdgdjC ++ mktemp + local LAST_ERR=/tmp/tmp.VFaBWMDZUa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete secret some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8vQSYdgdjC secret "some-name-ssl-internal" deleted + cat /tmp/tmp.VFaBWMDZUa + rm /tmp/tmp.8vQSYdgdjC /tmp/tmp.VFaBWMDZUa + return 0 + kubectl_bin delete secret some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.1nkF2Z8pTK ++ mktemp + local LAST_ERR=/tmp/tmp.zrgg14sFhM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete secret some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1nkF2Z8pTK secret "some-name-ssl" deleted + cat /tmp/tmp.zrgg14sFhM + rm /tmp/tmp.1nkF2Z8pTK /tmp/tmp.zrgg14sFhM + return 0 + sleep 30 + desc 'recreate PSMDB cluster some-name' + set +o xtrace ----------------------------------------------------------------------------------- recreate PSMDB cluster some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + local LAST_OUT=/tmp/tmp.WRUIRp64jz + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1598-171aada3"' ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.sSxFqSQO0p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WRUIRp64jz perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.sSxFqSQO0p + rm /tmp/tmp.WRUIRp64jz /tmp/tmp.sSxFqSQO0p + return 0 + desc 'check if all Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J9DauwuViC +++ mktemp ++ local LAST_ERR=/tmp/tmp.fSVPBRLuPD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J9DauwuViC ++ cat /tmp/tmp.fSVPBRLuPD ++ rm /tmp/tmp.J9DauwuViC /tmp/tmp.fSVPBRLuPD ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eM9RRLfaMJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ykialyrIFl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eM9RRLfaMJ ++ cat /tmp/tmp.ykialyrIFl ++ rm /tmp/tmp.eM9RRLfaMJ /tmp/tmp.ykialyrIFl ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ilvWtnI40u +++ mktemp ++ local LAST_ERR=/tmp/tmp.0wXFikqTND ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ilvWtnI40u ++ cat /tmp/tmp.0wXFikqTND ++ rm /tmp/tmp.ilvWtnI40u /tmp/tmp.0wXFikqTND ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zDpyg2yDOO +++ mktemp ++ local LAST_ERR=/tmp/tmp.BYfZaNcKeP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zDpyg2yDOO ++ cat /tmp/tmp.BYfZaNcKeP ++ rm /tmp/tmp.zDpyg2yDOO /tmp/tmp.BYfZaNcKeP ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EHOJKklAbB +++ mktemp ++ local LAST_ERR=/tmp/tmp.uURP4NPhb4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EHOJKklAbB ++ cat /tmp/tmp.uURP4NPhb4 ++ rm /tmp/tmp.EHOJKklAbB /tmp/tmp.uURP4NPhb4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hkU4bk7nra +++ mktemp ++ local LAST_ERR=/tmp/tmp.beIN0WKo5c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hkU4bk7nra ++ cat /tmp/tmp.beIN0WKo5c ++ rm /tmp/tmp.hkU4bk7nra /tmp/tmp.beIN0WKo5c ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KyRDGrEKuM ++ mktemp + local LAST_ERR=/tmp/tmp.ZxgU8R34wU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KyRDGrEKuM + cat /tmp/tmp.ZxgU8R34wU + rm /tmp/tmp.KyRDGrEKuM /tmp/tmp.ZxgU8R34wU + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0.yml /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.8xBXkkkm2C ++ mktemp + local LAST_ERR=/tmp/tmp.nS9oKopQm9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8xBXkkkm2C + cat /tmp/tmp.nS9oKopQm9 + rm /tmp/tmp.8xBXkkkm2C /tmp/tmp.nS9oKopQm9 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg.yml /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos + local resource=statefulset/some-name-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1C56GesGmy ++ mktemp + local LAST_ERR=/tmp/tmp.Btb9yEjjIJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1C56GesGmy + cat /tmp/tmp.Btb9yEjjIJ + rm /tmp/tmp.1C56GesGmy /tmp/tmp.Btb9yEjjIJ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos.yml /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + desc 'check if certificates issued with certmanager' + set +o xtrace ----------------------------------------------------------------------------------- check if certificates issued with certmanager ----------------------------------------------------------------------------------- + check_tls_secret some-name-ssl + local secret_name=some-name-ssl + check_secret_data_key some-name-ssl ca.crt + local secret_name=some-name-ssl + local data_key=ca.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["ca.crt"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JkQEtCKI6i +++ mktemp ++ local LAST_ERR=/tmp/tmp.Oj0jUqO9JH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JkQEtCKI6i ++ cat /tmp/tmp.Oj0jUqO9JH ++ rm /tmp/tmp.JkQEtCKI6i /tmp/tmp.Oj0jUqO9JH ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrekNDQWVPZ0F3SUJBZ0lSQUt5N21hQ3FMMmhiQXRjQ08rZWRMeWt3RFFZSktvWklodmNOQVFFTEJRQXcKRnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNQjRYRFRJME1EY3hPVEU1TlRjME9Wb1hEVEkxTURjeApPVEU1TlRjME9Wb3dGekVWTUJNR0ExVUVBeE1NYzI5dFpTMXVZVzFsTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCCkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTF4R2xLb1hsQlcwV1hKT25MV0hDSHlBbFlWRkFVVHRMMjQwSUpuVFcKVG5wVzBReUJuOEpHTVgxanpTSEhmRUpqc01zazVZWDB0VG1Kem1TU09qNW1JckJBOCtObFlZaW9rSy9rN0gwWQpPUGlEN1d2bi9ZZFlpMHV0amtlVWNtaUIyZXJGaFFkLzlBQk9haEFIQjNZVEtsK2k4ZHVwMHVyQ0NmWnNWUUlsCkh1SUJNSlJDRG9ucFJEQnBjdnhTb3dFbGJRZ1R1cjUzQ2hHOHFXUGhiZUNtREVEMnNPNmtURE9FRjhUSlpMaGUKMGRMc3NqZVh4UnBEaytzREo4amc3SHBXWFppSlhQVCtHNDA2U2NXYkNtM05uWFUydTVLZ01xNHRJK2xXUVJTcApnSExoa2lYQjhNZnNOV0Z4aS9ya1FGbjhyb0ZHcWcvRkFmZHVaMWFjVnJsK1FRSURBUUFCbzBJd1FEQU9CZ05WCkhROEJBZjhFQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVV3UUs2VkRlK2NQM1QKSGtNTk54cnBJd0JUekRVd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFFZDdQR2RvcEVmd3pPeFRZSHl4bGwyRgplYnZ5YzE2NENmY0lQOEo5alU5SE5KcHFRVmNwekRkRnhIR0doL1VuUUpBSzUzRjBHS2tZZFdvWGNFdFhnSFd6CllEai9BcG9aeitJMUltYWdsKzhiekVucXR0V3ZjTFhxQjJqUmNmQnorNVEvRlZ1Q21jNkZOUzVubnBEbDAvankKN3ZFRFNkVElXUEtYelNGRVpTUEh6WkFEVG9ubXUxdHhPNnJORkM5bkVhdnRSWnM3UUVwYkk5bzBoUE9uTmRCZgozaDNjbkprNUZaRTdKMUhYaEdzelBuWDA0Q3VjVEZKd09ZeEw4RXlkTXdBQ1d1MkREUUZxYWV6VDdENFZ1OWVJCmNDZkpsamRNY09FRDNXYktKdzR1ZkpsWFpRbTI3dmZ1TXNlR0xxcHllTnQ2Y2JzZTEzRXJGcWdabklHZzhQbz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrekNDQWVPZ0F3SUJBZ0lSQUt5N21hQ3FMMmhiQXRjQ08rZWRMeWt3RFFZSktvWklodmNOQVFFTEJRQXcKRnpFVk1CTUdBMVVFQXhNTWMyOXRaUzF1WVcxbExXTmhNQjRYRFRJME1EY3hPVEU1TlRjME9Wb1hEVEkxTURjeApPVEU1TlRjME9Wb3dGekVWTUJNR0ExVUVBeE1NYzI5dFpTMXVZVzFsTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCCkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTF4R2xLb1hsQlcwV1hKT25MV0hDSHlBbFlWRkFVVHRMMjQwSUpuVFcKVG5wVzBReUJuOEpHTVgxanpTSEhmRUpqc01zazVZWDB0VG1Kem1TU09qNW1JckJBOCtObFlZaW9rSy9rN0gwWQpPUGlEN1d2bi9ZZFlpMHV0amtlVWNtaUIyZXJGaFFkLzlBQk9haEFIQjNZVEtsK2k4ZHVwMHVyQ0NmWnNWUUlsCkh1SUJNSlJDRG9ucFJEQnBjdnhTb3dFbGJRZ1R1cjUzQ2hHOHFXUGhiZUNtREVEMnNPNmtURE9FRjhUSlpMaGUKMGRMc3NqZVh4UnBEaytzREo4amc3SHBXWFppSlhQVCtHNDA2U2NXYkNtM05uWFUydTVLZ01xNHRJK2xXUVJTcApnSExoa2lYQjhNZnNOV0Z4aS9ya1FGbjhyb0ZHcWcvRkFmZHVaMWFjVnJsK1FRSURBUUFCbzBJd1FEQU9CZ05WCkhROEJBZjhFQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVV3UUs2VkRlK2NQM1QKSGtNTk54cnBJd0JUekRVd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFFZDdQR2RvcEVmd3pPeFRZSHl4bGwyRgplYnZ5YzE2NENmY0lQOEo5alU5SE5KcHFRVmNwekRkRnhIR0doL1VuUUpBSzUzRjBHS2tZZFdvWGNFdFhnSFd6CllEai9BcG9aeitJMUltYWdsKzhiekVucXR0V3ZjTFhxQjJqUmNmQnorNVEvRlZ1Q21jNkZOUzVubnBEbDAvankKN3ZFRFNkVElXUEtYelNGRVpTUEh6WkFEVG9ubXUxdHhPNnJORkM5bkVhdnRSWnM3UUVwYkk5bzBoUE9uTmRCZgozaDNjbkprNUZaRTdKMUhYaEdzelBuWDA0Q3VjVEZKd09ZeEw4RXlkTXdBQ1d1MkREUUZxYWV6VDdENFZ1OWVJCmNDZkpsamRNY09FRDNXYktKdzR1ZkpsWFpRbTI3dmZ1TXNlR0xxcHllTnQ2Y2JzZTEzRXJGcWdabklHZzhQbz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' ']' + check_secret_data_key some-name-ssl tls.crt + local secret_name=some-name-ssl + local data_key=tls.crt + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json +++ mktemp ++ jq '.data["tls.crt"]' ++ local LAST_OUT=/tmp/tmp.zDAprNxLYm +++ mktemp ++ local LAST_ERR=/tmp/tmp.e5itza7MpT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zDAprNxLYm ++ cat /tmp/tmp.e5itza7MpT ++ rm /tmp/tmp.zDAprNxLYm /tmp/tmp.e5itza7MpT ++ return 0 + secret_data='"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg1ekNDQnMrZ0F3SUJBZ0lRV0ZJUEVpK241SHU4VUZVaWJpckx6REFOQmdrcWhraUc5dzBCQVFzRkFEQVgKTVJVd0V3WURWUVFERXd4emIyMWxMVzVoYldVdFkyRXdIaGNOTWpRd056RTVNakF3TXpFMldoY05NalF4TURFMwpNakF3TXpFMldqQWtNUTR3REFZRFZRUUtFd1ZRVTAxRVFqRVNNQkFHQTFVRUF4TUpjMjl0WlMxdVlXMWxNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1eWlXaGVLOEF6UTUyajV6UTFsYkNoYWMKdFIwVCtpdzhweFZibzBhcWhTUlhoQ3dUdHdCN3VUTnpCak1UeFVOYzViRHlqZ2NCVkdaaEw5UmZBTk5iWmNUMgovN1FHYU0yVlRHaThNZ2l3aXVpMVR1b1kvRFRqVmxRckF5TlpvR2NjbDh4bGRyQi82ck9tamMyYUhxdmN4Vm1TClk5NHdkVlcvakNXa0dMNnhlUWl6WkxZVm50eHZ3am1tRFVLWmZlemZpV3pVclJ4WEhYakJsejJFSWNwTU5pdzgKZUR3QVdXWVNhMDZ1N1dvQUVDYk1hWG95dlY2N2F0WVRTa2EvNHFnM0lGV2ZaWEVlWitNaXlvckU2a0VvSkVTKwpQazlDTGJMNXVJTzlVVTJqVFF1Lys2TnhwckNTS1V3YkphcnJWZ0VkYjhsUGFmUWx2ZkF3eWVYbUFYc2Mxd0lECkFRQUJvNElGSURDQ0JSd3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01Bd0dBMVVkRXdFQi93UUNNQUF3SHdZRFZSMGoKQkJnd0ZvQVV3UUs2VkRlK2NQM1RIa01OTnhycEl3QlR6RFV3Z2dUWkJnTlZIUkVFZ2dUUU1JSUV6SUlKYkc5agpZV3hvYjNOMGdnMXpiMjFsTFc1aGJXVXRjbk13Z2lwemIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqClpYSjBMVzFoYm1GblpYSXRNVEU1TmpHQ1BITnZiV1V0Ym1GdFpTMXljekF1ZEd4ekxXbHpjM1ZsTFdObGNuUXQKYldGdVlXZGxjaTB4TVRrMk1TNXpkbU11WTJ4MWMzUmxjaTVzYjJOaGJJSVBLaTV6YjIxbExXNWhiV1V0Y25NdwpnaXdxTG5OdmJXVXRibUZ0WlMxeWN6QXVkR3h6TFdsemMzVmxMV05sY25RdGJXRnVZV2RsY2kweE1UazJNWUkrCktpNXpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVEU1TmpFdWMzWmoKTG1Oc2RYTjBaWEl1Ykc5allXeUNQM052YldVdGJtRnRaUzF5Y3pBdWRHeHpMV2x6YzNWbExXTmxjblF0YldGdQpZV2RsY2kweE1UazJNUzV6ZG1NdVkyeDFjM1JsY25ObGRDNXNiMk5oYklKQktpNXpiMjFsTFc1aGJXVXRjbk13CkxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TVRFNU5qRXVjM1pqTG1Oc2RYTjBaWEp6WlhRdWJHOWoKWVd5Q015b3VkR3h6TFdsemMzVmxMV05sY25RdGJXRnVZV2RsY2kweE1UazJNUzV6ZG1NdVkyeDFjM1JsY25ObApkQzVzYjJOaGJJSVFjMjl0WlMxdVlXMWxMVzF2Ym1kdmM0SXRjMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTBiSE10CmFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURXhPVFl4Z2o5emIyMWxMVzVoYldVdGJXOXVaMjl6TG5Sc2N5MXAKYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TVRFNU5qRXVjM1pqTG1Oc2RYTjBaWEl1Ykc5allXeUNFaW91YzI5dApaUzF1WVcxbExXMXZibWR2YzRJdktpNXpiMjFsTFc1aGJXVXRiVzl1WjI5ekxuUnNjeTFwYzNOMVpTMWpaWEowCkxXMWhibUZuWlhJdE1URTVOakdDUVNvdWMyOXRaUzF1WVcxbExXMXZibWR2Y3k1MGJITXRhWE56ZFdVdFkyVnkKZEMxdFlXNWhaMlZ5TFRFeE9UWXhMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNnZzF6YjIxbExXNWhiV1V0WTJabgpnaXB6YjIxbExXNWhiV1V0WTJabkxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TVRFNU5qR0NQSE52CmJXVXRibUZ0WlMxalptY3VkR3h6TFdsemMzVmxMV05sY25RdGJXRnVZV2RsY2kweE1UazJNUzV6ZG1NdVkyeDEKYzNSbGNpNXNiMk5oYklJUEtpNXpiMjFsTFc1aGJXVXRZMlpuZ2l3cUxuTnZiV1V0Ym1GdFpTMWpabWN1ZEd4egpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TVRrMk1ZSStLaTV6YjIxbExXNWhiV1V0WTJabkxuUnNjeTFwCmMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1URTVOakV1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDUW5OdmJXVXQKYm1GdFpTMXRiMjVuYjNNdWRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TVRrMk1TNXpkbU11WTJ4MQpjM1JsY25ObGRDNXNiMk5oYklKRUtpNXpiMjFsTFc1aGJXVXRiVzl1WjI5ekxuUnNjeTFwYzNOMVpTMWpaWEowCkxXMWhibUZuWlhJdE1URTVOakV1YzNaakxtTnNkWE4wWlhKelpYUXViRzlqWVd5Q1AzTnZiV1V0Ym1GdFpTMWoKWm1jdWRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TVRrMk1TNXpkbU11WTJ4MWMzUmxjbk5sZEM1cwpiMk5oYklKQktpNXpiMjFsTFc1aGJXVXRZMlpuTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVEU1Ck5qRXVjM1pqTG1Oc2RYTjBaWEp6WlhRdWJHOWpZV3d3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUdxRURtMFoKcm9NbVVMVm5LT3MrYzRjbWJUdmRJTUprR0dRV0hpT2FtUlkvcjRzU3JyUUdPMGdFYmxOWFNvSG9QcEZZY3l0NwpVdUpoSXJHcUt5Rmplc0pzWWkzVTVJVFBCbjBZdG9ucGlqbHhIOXN2K0g2emtodkEwM2M3TktzNXR6bUZJT212CmE2bTFIRk5ZaHdUeXR4UWU4SFZWZzM2T3lDQktiNFJXN01hUGNNOEsvV0hJSU5vNmg4Yk5YMHhVWUx5Nk5RRUwKdVlMc0JlaDZCcHJmcXlrLzR6ZEFQWDk5bGRTMTJsN2Ird2t3YiswSUFTQ0NubXBYTm9yM2IwQ1pOVCtETWNaOApIckVIZDNDUGZxTEpPNWErODNUWktBcTc2T1R0bWp1Z3FSTWpFMVg2QUVEbDREc2hwUDR2a3Vua2gxU0NOdnBOCktBRTBoa3FSbDRUeWRsUT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' + '[' -z '"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUg1ekNDQnMrZ0F3SUJBZ0lRV0ZJUEVpK241SHU4VUZVaWJpckx6REFOQmdrcWhraUc5dzBCQVFzRkFEQVgKTVJVd0V3WURWUVFERXd4emIyMWxMVzVoYldVdFkyRXdIaGNOTWpRd056RTVNakF3TXpFMldoY05NalF4TURFMwpNakF3TXpFMldqQWtNUTR3REFZRFZRUUtFd1ZRVTAxRVFqRVNNQkFHQTFVRUF4TUpjMjl0WlMxdVlXMWxNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1eWlXaGVLOEF6UTUyajV6UTFsYkNoYWMKdFIwVCtpdzhweFZibzBhcWhTUlhoQ3dUdHdCN3VUTnpCak1UeFVOYzViRHlqZ2NCVkdaaEw5UmZBTk5iWmNUMgovN1FHYU0yVlRHaThNZ2l3aXVpMVR1b1kvRFRqVmxRckF5TlpvR2NjbDh4bGRyQi82ck9tamMyYUhxdmN4Vm1TClk5NHdkVlcvakNXa0dMNnhlUWl6WkxZVm50eHZ3am1tRFVLWmZlemZpV3pVclJ4WEhYakJsejJFSWNwTU5pdzgKZUR3QVdXWVNhMDZ1N1dvQUVDYk1hWG95dlY2N2F0WVRTa2EvNHFnM0lGV2ZaWEVlWitNaXlvckU2a0VvSkVTKwpQazlDTGJMNXVJTzlVVTJqVFF1Lys2TnhwckNTS1V3YkphcnJWZ0VkYjhsUGFmUWx2ZkF3eWVYbUFYc2Mxd0lECkFRQUJvNElGSURDQ0JSd3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01Bd0dBMVVkRXdFQi93UUNNQUF3SHdZRFZSMGoKQkJnd0ZvQVV3UUs2VkRlK2NQM1RIa01OTnhycEl3QlR6RFV3Z2dUWkJnTlZIUkVFZ2dUUU1JSUV6SUlKYkc5agpZV3hvYjNOMGdnMXpiMjFsTFc1aGJXVXRjbk13Z2lwemIyMWxMVzVoYldVdGNuTXdMblJzY3kxcGMzTjFaUzFqClpYSjBMVzFoYm1GblpYSXRNVEU1TmpHQ1BITnZiV1V0Ym1GdFpTMXljekF1ZEd4ekxXbHpjM1ZsTFdObGNuUXQKYldGdVlXZGxjaTB4TVRrMk1TNXpkbU11WTJ4MWMzUmxjaTVzYjJOaGJJSVBLaTV6YjIxbExXNWhiV1V0Y25NdwpnaXdxTG5OdmJXVXRibUZ0WlMxeWN6QXVkR3h6TFdsemMzVmxMV05sY25RdGJXRnVZV2RsY2kweE1UazJNWUkrCktpNXpiMjFsTFc1aGJXVXRjbk13TG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVEU1TmpFdWMzWmoKTG1Oc2RYTjBaWEl1Ykc5allXeUNQM052YldVdGJtRnRaUzF5Y3pBdWRHeHpMV2x6YzNWbExXTmxjblF0YldGdQpZV2RsY2kweE1UazJNUzV6ZG1NdVkyeDFjM1JsY25ObGRDNXNiMk5oYklKQktpNXpiMjFsTFc1aGJXVXRjbk13CkxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TVRFNU5qRXVjM1pqTG1Oc2RYTjBaWEp6WlhRdWJHOWoKWVd5Q015b3VkR3h6TFdsemMzVmxMV05sY25RdGJXRnVZV2RsY2kweE1UazJNUzV6ZG1NdVkyeDFjM1JsY25ObApkQzVzYjJOaGJJSVFjMjl0WlMxdVlXMWxMVzF2Ym1kdmM0SXRjMjl0WlMxdVlXMWxMVzF2Ym1kdmN5NTBiSE10CmFYTnpkV1V0WTJWeWRDMXRZVzVoWjJWeUxURXhPVFl4Z2o5emIyMWxMVzVoYldVdGJXOXVaMjl6TG5Sc2N5MXAKYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TVRFNU5qRXVjM1pqTG1Oc2RYTjBaWEl1Ykc5allXeUNFaW91YzI5dApaUzF1WVcxbExXMXZibWR2YzRJdktpNXpiMjFsTFc1aGJXVXRiVzl1WjI5ekxuUnNjeTFwYzNOMVpTMWpaWEowCkxXMWhibUZuWlhJdE1URTVOakdDUVNvdWMyOXRaUzF1WVcxbExXMXZibWR2Y3k1MGJITXRhWE56ZFdVdFkyVnkKZEMxdFlXNWhaMlZ5TFRFeE9UWXhMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNnZzF6YjIxbExXNWhiV1V0WTJabgpnaXB6YjIxbExXNWhiV1V0WTJabkxuUnNjeTFwYzNOMVpTMWpaWEowTFcxaGJtRm5aWEl0TVRFNU5qR0NQSE52CmJXVXRibUZ0WlMxalptY3VkR3h6TFdsemMzVmxMV05sY25RdGJXRnVZV2RsY2kweE1UazJNUzV6ZG1NdVkyeDEKYzNSbGNpNXNiMk5oYklJUEtpNXpiMjFsTFc1aGJXVXRZMlpuZ2l3cUxuTnZiV1V0Ym1GdFpTMWpabWN1ZEd4egpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TVRrMk1ZSStLaTV6YjIxbExXNWhiV1V0WTJabkxuUnNjeTFwCmMzTjFaUzFqWlhKMExXMWhibUZuWlhJdE1URTVOakV1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDUW5OdmJXVXQKYm1GdFpTMXRiMjVuYjNNdWRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TVRrMk1TNXpkbU11WTJ4MQpjM1JsY25ObGRDNXNiMk5oYklKRUtpNXpiMjFsTFc1aGJXVXRiVzl1WjI5ekxuUnNjeTFwYzNOMVpTMWpaWEowCkxXMWhibUZuWlhJdE1URTVOakV1YzNaakxtTnNkWE4wWlhKelpYUXViRzlqWVd5Q1AzTnZiV1V0Ym1GdFpTMWoKWm1jdWRHeHpMV2x6YzNWbExXTmxjblF0YldGdVlXZGxjaTB4TVRrMk1TNXpkbU11WTJ4MWMzUmxjbk5sZEM1cwpiMk5oYklKQktpNXpiMjFsTFc1aGJXVXRZMlpuTG5Sc2N5MXBjM04xWlMxalpYSjBMVzFoYm1GblpYSXRNVEU1Ck5qRXVjM1pqTG1Oc2RYTjBaWEp6WlhRdWJHOWpZV3d3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUdxRURtMFoKcm9NbVVMVm5LT3MrYzRjbWJUdmRJTUprR0dRV0hpT2FtUlkvcjRzU3JyUUdPMGdFYmxOWFNvSG9QcEZZY3l0NwpVdUpoSXJHcUt5Rmplc0pzWWkzVTVJVFBCbjBZdG9ucGlqbHhIOXN2K0g2emtodkEwM2M3TktzNXR6bUZJT212CmE2bTFIRk5ZaHdUeXR4UWU4SFZWZzM2T3lDQktiNFJXN01hUGNNOEsvV0hJSU5vNmg4Yk5YMHhVWUx5Nk5RRUwKdVlMc0JlaDZCcHJmcXlrLzR6ZEFQWDk5bGRTMTJsN2Ird2t3YiswSUFTQ0NubXBYTm9yM2IwQ1pOVCtETWNaOApIckVIZDNDUGZxTEpPNWErODNUWktBcTc2T1R0bWp1Z3FSTWpFMVg2QUVEbDREc2hwUDR2a3Vua2gxU0NOdnBOCktBRTBoa3FSbDRUeWRsUT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="' ']' + check_secret_data_key some-name-ssl tls.key + local secret_name=some-name-ssl + local data_key=tls.key + local secret_data ++ kubectl_bin get secrets/some-name-ssl -o json ++ jq '.data["tls.key"]' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9czbJdridF +++ mktemp ++ local LAST_ERR=/tmp/tmp.pxzj4NTP9T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/some-name-ssl -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9czbJdridF ++ cat /tmp/tmp.pxzj4NTP9T ++ rm /tmp/tmp.9czbJdridF /tmp/tmp.pxzj4NTP9T ++ return 0 + secret_data='"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdXlpV2hlSzhBelE1Mmo1elExbGJDaGFjdFIwVCtpdzhweFZibzBhcWhTUlhoQ3dUCnR3Qjd1VE56QmpNVHhVTmM1YkR5amdjQlZHWmhMOVJmQU5OYlpjVDIvN1FHYU0yVlRHaThNZ2l3aXVpMVR1b1kKL0RUalZsUXJBeU5ab0djY2w4eGxkckIvNnJPbWpjMmFIcXZjeFZtU1k5NHdkVlcvakNXa0dMNnhlUWl6WkxZVgpudHh2d2ptbURVS1pmZXpmaVd6VXJSeFhIWGpCbHoyRUljcE1OaXc4ZUR3QVdXWVNhMDZ1N1dvQUVDYk1hWG95CnZWNjdhdFlUU2thLzRxZzNJRldmWlhFZVorTWl5b3JFNmtFb0pFUytQazlDTGJMNXVJTzlVVTJqVFF1Lys2TngKcHJDU0tVd2JKYXJyVmdFZGI4bFBhZlFsdmZBd3llWG1BWHNjMXdJREFRQUJBb0lCQUJQbm15cWVGSlRhcHhSNAo4OVZEMlpUWEZzK0VTaTJmS3N3eVQwNnNJMUtKck8wWHF2a1ppZzhHVThOakIwVkJoczIrNWUwSS9iRTJPanZXCkFFeWNMZXBDNm1vRGdYTC8rbExvQ0t2aGxMVVJUc2NYeXBSNEkzMTQ4aUloVGxUcTRuV2FmV2dFR1hhaXFLSUkKTGMzNVJUK1A2U3ZKNDZsMjl5bzJqcDg3R0xEZ2QzU1NqS2M4MEJHNEpEMTNIQkRva1g5QVJ3eDhIZ2U4VDNXbwp3WlVjNGszMnRqOGhYOUhtUzZrMHgwZEcwTnJZNGlDbU5QbjdYcXV5ckVhOUF5KzlGWWJxUlNRLzQ5cmVITTZ2ClNZS3BRaUJGWEp5YkVxNHplUWYwM1R2Q0hrZ3h5cXNjN0pQMGdWc3d4aUhtV0tNU1crSFpvRDNJanAwaFNoS2gKM0FuVlE4RUNnWUVBMnpUSkt6cHBaYWlKa255UHJyekdXbk4xNEU4QnV1amNQZEU3c0x0ZUd4WFpRTzRpUE1PRApGai8zV1ZzWE1lUXdmQm9rV3BzQnA4NnF5dHJUc2NCb0dWT3VZcEpNN0dzQWE1blJuTUVYMEM5eEUxMGlvZnhXCk40NmZKVUtiRnFJdks1VjhaeUVBWEt6M3czckZ4cTV5cEs5UzRXWXFnQTlDWFhuVXR0TGQ2NUVDZ1lFQTJwSzUKMFVjVi9ubDRCNXNuWFZrSm90ZkJHVnk5SnpGZDRvajB0R2ZmNkwxRUVCMEhxSlJweThkc0xhNlo2RlhFNmhMdwoybzBvaDhmbWh3cjhSdU9VeEVFcUl4aHlraDI0N2h0OWsxWmlHUEpyNmEvbFV4YkQyUWVxWHkybG5wUmhnYUpVClRXanRORCtBY1NIejBLUHJwNGxqTGpPbkc0VWJBMGlUc2FkNlBlY0NnWUF5UWk0RmZVVUJDOTFPK3NRdXVoVjQKVTVTcE16UWdXSmQyak94MG51RDZzTTI0OTJCZU1pMXlBb1NCdGtMaEs0SjZVYndZRWdZVkpXcy9TMkRTRmwxSAp3VW53Uzc3YlRjVXc1bm1LeVRMZWhxSVVZSHc2UzVLdzlrQkMyTHlhT2VFNnl2RlU2OW9iWllVQkNyc2h6SWw0CmZNSXg3Nnk2NDhUQmpkZFplNHRxSVFLQmdGU0tyaTdoWlFOeDdsaGF6ZVlRVzJiT054VmRScGlUUXJJdHNTRGEKa01QUCtUU1dWZkVCdnM0Z1M1cDd6M0lMUXNnYkhXZ2JjMUxab2pRWHJxNm90bEVXdDNxdHFSc1ZQZ056UktZcApvblh0eG9tMERHcVFBcDYrbFVxTk9HTlhGMFB5Q3RPS1F6MlRWbFhzQVFkeGIrUDBneFZLeTFjY3ErOGdNbWhqCjFDZC9Bb0dBR1VQR0luWHVMbHZjays5b1NQMWQzSFdOR3hrZHFxTEN2QTVMaDA3TkNpNFN4bFVyS3hQczQ3MysKdko4KzRNKy91NTZBdFJMRzVEZE1yeW9ZOWszRjYwYkx0Vkh4Rk1EVWd4UEl4dW5jVjltNnVsaEw5bVVUeHpiSwpJRHhjNDZtWGI5SlBVYUlwRmhreUlQTk5HV1dXZEpVNThIMkQ3aEhJb1dnbmhQanhHTzg9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="' + '[' -z '"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdXlpV2hlSzhBelE1Mmo1elExbGJDaGFjdFIwVCtpdzhweFZibzBhcWhTUlhoQ3dUCnR3Qjd1VE56QmpNVHhVTmM1YkR5amdjQlZHWmhMOVJmQU5OYlpjVDIvN1FHYU0yVlRHaThNZ2l3aXVpMVR1b1kKL0RUalZsUXJBeU5ab0djY2w4eGxkckIvNnJPbWpjMmFIcXZjeFZtU1k5NHdkVlcvakNXa0dMNnhlUWl6WkxZVgpudHh2d2ptbURVS1pmZXpmaVd6VXJSeFhIWGpCbHoyRUljcE1OaXc4ZUR3QVdXWVNhMDZ1N1dvQUVDYk1hWG95CnZWNjdhdFlUU2thLzRxZzNJRldmWlhFZVorTWl5b3JFNmtFb0pFUytQazlDTGJMNXVJTzlVVTJqVFF1Lys2TngKcHJDU0tVd2JKYXJyVmdFZGI4bFBhZlFsdmZBd3llWG1BWHNjMXdJREFRQUJBb0lCQUJQbm15cWVGSlRhcHhSNAo4OVZEMlpUWEZzK0VTaTJmS3N3eVQwNnNJMUtKck8wWHF2a1ppZzhHVThOakIwVkJoczIrNWUwSS9iRTJPanZXCkFFeWNMZXBDNm1vRGdYTC8rbExvQ0t2aGxMVVJUc2NYeXBSNEkzMTQ4aUloVGxUcTRuV2FmV2dFR1hhaXFLSUkKTGMzNVJUK1A2U3ZKNDZsMjl5bzJqcDg3R0xEZ2QzU1NqS2M4MEJHNEpEMTNIQkRva1g5QVJ3eDhIZ2U4VDNXbwp3WlVjNGszMnRqOGhYOUhtUzZrMHgwZEcwTnJZNGlDbU5QbjdYcXV5ckVhOUF5KzlGWWJxUlNRLzQ5cmVITTZ2ClNZS3BRaUJGWEp5YkVxNHplUWYwM1R2Q0hrZ3h5cXNjN0pQMGdWc3d4aUhtV0tNU1crSFpvRDNJanAwaFNoS2gKM0FuVlE4RUNnWUVBMnpUSkt6cHBaYWlKa255UHJyekdXbk4xNEU4QnV1amNQZEU3c0x0ZUd4WFpRTzRpUE1PRApGai8zV1ZzWE1lUXdmQm9rV3BzQnA4NnF5dHJUc2NCb0dWT3VZcEpNN0dzQWE1blJuTUVYMEM5eEUxMGlvZnhXCk40NmZKVUtiRnFJdks1VjhaeUVBWEt6M3czckZ4cTV5cEs5UzRXWXFnQTlDWFhuVXR0TGQ2NUVDZ1lFQTJwSzUKMFVjVi9ubDRCNXNuWFZrSm90ZkJHVnk5SnpGZDRvajB0R2ZmNkwxRUVCMEhxSlJweThkc0xhNlo2RlhFNmhMdwoybzBvaDhmbWh3cjhSdU9VeEVFcUl4aHlraDI0N2h0OWsxWmlHUEpyNmEvbFV4YkQyUWVxWHkybG5wUmhnYUpVClRXanRORCtBY1NIejBLUHJwNGxqTGpPbkc0VWJBMGlUc2FkNlBlY0NnWUF5UWk0RmZVVUJDOTFPK3NRdXVoVjQKVTVTcE16UWdXSmQyak94MG51RDZzTTI0OTJCZU1pMXlBb1NCdGtMaEs0SjZVYndZRWdZVkpXcy9TMkRTRmwxSAp3VW53Uzc3YlRjVXc1bm1LeVRMZWhxSVVZSHc2UzVLdzlrQkMyTHlhT2VFNnl2RlU2OW9iWllVQkNyc2h6SWw0CmZNSXg3Nnk2NDhUQmpkZFplNHRxSVFLQmdGU0tyaTdoWlFOeDdsaGF6ZVlRVzJiT054VmRScGlUUXJJdHNTRGEKa01QUCtUU1dWZkVCdnM0Z1M1cDd6M0lMUXNnYkhXZ2JjMUxab2pRWHJxNm90bEVXdDNxdHFSc1ZQZ056UktZcApvblh0eG9tMERHcVFBcDYrbFVxTk9HTlhGMFB5Q3RPS1F6MlRWbFhzQVFkeGIrUDBneFZLeTFjY3ErOGdNbWhqCjFDZC9Bb0dBR1VQR0luWHVMbHZjays5b1NQMWQzSFdOR3hrZHFxTEN2QTVMaDA3TkNpNFN4bFVyS3hQczQ3MysKdko4KzRNKy91NTZBdFJMRzVEZE1yeW9ZOWszRjYwYkx0Vkh4Rk1EVWd4UEl4dW5jVjltNnVsaEw5bVVUeHpiSwpJRHhjNDZtWGI5SlBVYUlwRmhreUlQTk5HV1dXZEpVNThIMkQ3aEhJb1dnbmhQanhHTzg9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="' ']' + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer ++ mktemp + local LAST_OUT=/tmp/tmp.kQHWorfUSd ++ mktemp + local LAST_ERR=/tmp/tmp.2cSSGu4Uf7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kQHWorfUSd + cat /tmp/tmp.2cSSGu4Uf7 + rm /tmp/tmp.kQHWorfUSd /tmp/tmp.2cSSGu4Uf7 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.VCxxT8y2lK ++ mktemp + local LAST_ERR=/tmp/tmp.0Zw8TxKQPe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VCxxT8y2lK + cat /tmp/tmp.0Zw8TxKQPe + rm /tmp/tmp.VCxxT8y2lK /tmp/tmp.0Zw8TxKQPe + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml certificate/some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.MpAPEJGKRG ++ mktemp + local LAST_ERR=/tmp/tmp.uxM1m7zhwO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MpAPEJGKRG + cat /tmp/tmp.uxM1m7zhwO + rm /tmp/tmp.MpAPEJGKRG /tmp/tmp.uxM1m7zhwO + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.6TSrMA3yJZ ++ mktemp + local LAST_ERR=/tmp/tmp.Vud9mU3cvg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6TSrMA3yJZ + cat /tmp/tmp.Vud9mU3cvg + rm /tmp/tmp.6TSrMA3yJZ /tmp/tmp.Vud9mU3cvg + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gZD5CfelVm +++ mktemp ++ local LAST_ERR=/tmp/tmp.5VpYv2YQQq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gZD5CfelVm ++ cat /tmp/tmp.5VpYv2YQQq ++ rm /tmp/tmp.gZD5CfelVm /tmp/tmp.5VpYv2YQQq ++ return 0 + pod_name=cmctl-f8bd55686-vk79t + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8FY7GgPg4c +++ mktemp ++ local LAST_ERR=/tmp/tmp.QarAoMbAiw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8FY7GgPg4c ++ cat /tmp/tmp.QarAoMbAiw ++ rm /tmp/tmp.8FY7GgPg4c /tmp/tmp.QarAoMbAiw ++ return 0 + revision=1 + kubectl_bin exec cmctl-f8bd55686-vk79t -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.j5iUq8AdQV ++ mktemp + local LAST_ERR=/tmp/tmp.uoIQMzycyH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec cmctl-f8bd55686-vk79t -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j5iUq8AdQV Manually triggered issuance of Certificate tls-issue-cert-manager-11961/some-name-ssl + cat /tmp/tmp.uoIQMzycyH + rm /tmp/tmp.j5iUq8AdQV /tmp/tmp.uoIQMzycyH + return 0 + for i in '{1..10}' + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.32nam2lekX +++ mktemp ++ local LAST_ERR=/tmp/tmp.h8VizLT6Xe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.32nam2lekX ++ cat /tmp/tmp.h8VizLT6Xe ++ rm /tmp/tmp.32nam2lekX /tmp/tmp.h8VizLT6Xe ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x95T8kPzi4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.A3QMf33GIl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x95T8kPzi4 ++ cat /tmp/tmp.A3QMf33GIl ++ rm /tmp/tmp.x95T8kPzi4 /tmp/tmp.A3QMf33GIl ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DzueGZx9q8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FxbsLDRUwv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DzueGZx9q8 ++ cat /tmp/tmp.FxbsLDRUwv ++ rm /tmp/tmp.DzueGZx9q8 /tmp/tmp.FxbsLDRUwv ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................................................................................................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KVLsa5DMBp +++ mktemp ++ local LAST_ERR=/tmp/tmp.0crhbZVfTV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KVLsa5DMBp ++ cat /tmp/tmp.0crhbZVfTV ++ rm /tmp/tmp.KVLsa5DMBp /tmp/tmp.0crhbZVfTV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I1r12TFvs3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sJ4uvXV9hy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I1r12TFvs3 ++ cat /tmp/tmp.sJ4uvXV9hy ++ rm /tmp/tmp.I1r12TFvs3 /tmp/tmp.sJ4uvXV9hy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7NzsBKSoSt +++ mktemp ++ local LAST_ERR=/tmp/tmp.JwKci5lboL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7NzsBKSoSt ++ cat /tmp/tmp.JwKci5lboL ++ rm /tmp/tmp.7NzsBKSoSt /tmp/tmp.JwKci5lboL ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KShAaRwkIA +++ mktemp ++ local LAST_ERR=/tmp/tmp.yLpRre6lgy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KShAaRwkIA ++ cat /tmp/tmp.yLpRre6lgy ++ rm /tmp/tmp.KShAaRwkIA /tmp/tmp.yLpRre6lgy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in '{1..10}' + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5hqFSurp00 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BwvpB05oMt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5hqFSurp00 ++ cat /tmp/tmp.BwvpB05oMt ++ rm /tmp/tmp.5hqFSurp00 /tmp/tmp.BwvpB05oMt ++ return 0 + pod_name=cmctl-f8bd55686-vk79t + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tQJOXEv1a7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tZAv7JiGxI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tQJOXEv1a7 ++ cat /tmp/tmp.tZAv7JiGxI ++ rm /tmp/tmp.tQJOXEv1a7 /tmp/tmp.tZAv7JiGxI ++ return 0 + revision=1 + kubectl_bin exec cmctl-f8bd55686-vk79t -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.lGW5bt2cts ++ mktemp + local LAST_ERR=/tmp/tmp.srfEBOe3Kc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec cmctl-f8bd55686-vk79t -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lGW5bt2cts Manually triggered issuance of Certificate tls-issue-cert-manager-11961/some-name-ssl-internal + cat /tmp/tmp.srfEBOe3Kc + rm /tmp/tmp.lGW5bt2cts /tmp/tmp.srfEBOe3Kc + return 0 + for i in '{1..10}' + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yL2uGJ1yFr +++ mktemp ++ local LAST_ERR=/tmp/tmp.pVqsJ2xlJZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yL2uGJ1yFr ++ cat /tmp/tmp.pVqsJ2xlJZ ++ rm /tmp/tmp.yL2uGJ1yFr /tmp/tmp.pVqsJ2xlJZ ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 10 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SWInpy6Ox9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.hlhJrbuOKr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SWInpy6Ox9 ++ cat /tmp/tmp.hlhJrbuOKr ++ rm /tmp/tmp.SWInpy6Ox9 /tmp/tmp.hlhJrbuOKr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fXEr8KqHBu +++ mktemp ++ local LAST_ERR=/tmp/tmp.b8tO4v9WaW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fXEr8KqHBu ++ cat /tmp/tmp.b8tO4v9WaW ++ rm /tmp/tmp.fXEr8KqHBu /tmp/tmp.b8tO4v9WaW ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................................................................................................................................................................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p3IUEPYtmr +++ mktemp ++ local LAST_ERR=/tmp/tmp.5s0c9Il5wj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p3IUEPYtmr ++ cat /tmp/tmp.5s0c9Il5wj ++ rm /tmp/tmp.p3IUEPYtmr /tmp/tmp.5s0c9Il5wj ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gFYbZ1s7mX +++ mktemp ++ local LAST_ERR=/tmp/tmp.hsBFfrOuQv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gFYbZ1s7mX ++ cat /tmp/tmp.hsBFfrOuQv ++ rm /tmp/tmp.gFYbZ1s7mX /tmp/tmp.hsBFfrOuQv ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yxrGHqsZIr +++ mktemp ++ local LAST_ERR=/tmp/tmp.SH806LJwQG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yxrGHqsZIr ++ cat /tmp/tmp.SH806LJwQG ++ rm /tmp/tmp.yxrGHqsZIr /tmp/tmp.SH806LJwQG ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3dnclaZo4x +++ mktemp ++ local LAST_ERR=/tmp/tmp.KgIYM3sZ7I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3dnclaZo4x ++ cat /tmp/tmp.KgIYM3sZ7I ++ rm /tmp/tmp.3dnclaZo4x /tmp/tmp.KgIYM3sZ7I ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'check if CA issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if CA issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-ca-issuer + local resource=issuer/some-name-psmdb-ca-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml + local new_result=/tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-ca-issuer ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.VoI3JAIPeL ++ mktemp + local LAST_ERR=/tmp/tmp.yhzUseNn6i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-ca-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VoI3JAIPeL + cat /tmp/tmp.yhzUseNn6i + rm /tmp/tmp.VoI3JAIPeL /tmp/tmp.yhzUseNn6i + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-ca-issuer.yml + desc 'check if issuer created' + set +o xtrace ----------------------------------------------------------------------------------- check if issuer created ----------------------------------------------------------------------------------- + compare_kubectl issuer/some-name-psmdb-issuer + local resource=issuer/some-name-psmdb-issuer + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml + local new_result=/tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer-oc.yml ']' + kubectl_bin get -o yaml issuer/some-name-psmdb-issuer + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1eCx5qfKWI ++ mktemp + local LAST_ERR=/tmp/tmp.nalb01sxOO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml issuer/some-name-psmdb-issuer + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1eCx5qfKWI + cat /tmp/tmp.nalb01sxOO + rm /tmp/tmp.1eCx5qfKWI /tmp/tmp.nalb01sxOO + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml /tmp/tmp.1McPIVQius/issuer_some-name-psmdb-issuer.yml + desc 'check if certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl + local resource=certificate/some-name-ssl + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml + local new_result=/tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.WQfYjCTZO1 ++ mktemp + local LAST_ERR=/tmp/tmp.pyoDXkrQ9s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WQfYjCTZO1 + cat /tmp/tmp.pyoDXkrQ9s + rm /tmp/tmp.WQfYjCTZO1 /tmp/tmp.pyoDXkrQ9s + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml /tmp/tmp.1McPIVQius/certificate_some-name-ssl.yml + desc 'check if internal certificate issued' + set +o xtrace ----------------------------------------------------------------------------------- check if internal certificate issued ----------------------------------------------------------------------------------- + compare_kubectl certificate/some-name-ssl-internal + local resource=certificate/some-name-ssl-internal + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml + local new_result=/tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal-oc.yml ']' + kubectl_bin get -o yaml certificate/some-name-ssl-internal ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.jXNzmk1jjY ++ mktemp + local LAST_ERR=/tmp/tmp.oJwK8aYiar + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml certificate/some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jXNzmk1jjY + cat /tmp/tmp.oJwK8aYiar + rm /tmp/tmp.jXNzmk1jjY /tmp/tmp.oJwK8aYiar + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml /tmp/tmp.1McPIVQius/certificate_some-name-ssl-internal.yml + desc 'disable TLS' + set +o xtrace ----------------------------------------------------------------------------------- disable TLS ----------------------------------------------------------------------------------- + pause_cluster some-name + local cluster_name=some-name + echo 'Pausing cluster some-name' Pausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' ++ mktemp + local LAST_OUT=/tmp/tmp.yGBEV0rnv3 ++ mktemp + local LAST_ERR=/tmp/tmp.LXB3TxSS3o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": true } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yGBEV0rnv3 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.LXB3TxSS3o + rm /tmp/tmp.yGBEV0rnv3 /tmp/tmp.LXB3TxSS3o + return 0 + wait_for_cluster_state some-name paused + local cluster_name=some-name + local target_state=paused + echo -n 'Waiting for cluster to reach paused state' Waiting for cluster to reach paused state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2FyCTMCGCo +++ mktemp ++ local LAST_ERR=/tmp/tmp.7jI7ketT38 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2FyCTMCGCo ++ cat /tmp/tmp.7jI7ketT38 ++ rm /tmp/tmp.2FyCTMCGCo /tmp/tmp.7jI7ketT38 ++ return 0 + [[ ready == paused ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6A3Z57zlZU +++ mktemp ++ local LAST_ERR=/tmp/tmp.dhPF9fDAET ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6A3Z57zlZU ++ cat /tmp/tmp.dhPF9fDAET ++ rm /tmp/tmp.6A3Z57zlZU /tmp/tmp.dhPF9fDAET ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TlFiJfEXWz +++ mktemp ++ local LAST_ERR=/tmp/tmp.DKMSkrZdvk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TlFiJfEXWz ++ cat /tmp/tmp.DKMSkrZdvk ++ rm /tmp/tmp.TlFiJfEXWz /tmp/tmp.DKMSkrZdvk ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BTl368OeFx +++ mktemp ++ local LAST_ERR=/tmp/tmp.KxbMZk3YYP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BTl368OeFx ++ cat /tmp/tmp.KxbMZk3YYP ++ rm /tmp/tmp.BTl368OeFx /tmp/tmp.KxbMZk3YYP ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BJCoZxvLzz +++ mktemp ++ local LAST_ERR=/tmp/tmp.0gRh5I6x6M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BJCoZxvLzz ++ cat /tmp/tmp.0gRh5I6x6M ++ rm /tmp/tmp.BJCoZxvLzz /tmp/tmp.0gRh5I6x6M ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jwsbir5kC4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FUcNJHEFd0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jwsbir5kC4 ++ cat /tmp/tmp.FUcNJHEFd0 ++ rm /tmp/tmp.jwsbir5kC4 /tmp/tmp.FUcNJHEFd0 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wyrppO5RVe +++ mktemp ++ local LAST_ERR=/tmp/tmp.qUBYn96FM3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wyrppO5RVe ++ cat /tmp/tmp.qUBYn96FM3 ++ rm /tmp/tmp.wyrppO5RVe /tmp/tmp.qUBYn96FM3 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LDLm5wK9bX +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cc7PDD9kW5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LDLm5wK9bX ++ cat /tmp/tmp.Cc7PDD9kW5 ++ rm /tmp/tmp.LDLm5wK9bX /tmp/tmp.Cc7PDD9kW5 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EtmKBsciJW +++ mktemp ++ local LAST_ERR=/tmp/tmp.uAXj31HNYY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EtmKBsciJW ++ cat /tmp/tmp.uAXj31HNYY ++ rm /tmp/tmp.EtmKBsciJW /tmp/tmp.uAXj31HNYY ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.doFzlvuWAO +++ mktemp ++ local LAST_ERR=/tmp/tmp.g4VJvAuBRY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.doFzlvuWAO ++ cat /tmp/tmp.g4VJvAuBRY ++ rm /tmp/tmp.doFzlvuWAO /tmp/tmp.g4VJvAuBRY ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5KPydIweOp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Skh7gHjbTR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5KPydIweOp ++ cat /tmp/tmp.Skh7gHjbTR ++ rm /tmp/tmp.5KPydIweOp /tmp/tmp.Skh7gHjbTR ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RsprbbIF5J +++ mktemp ++ local LAST_ERR=/tmp/tmp.oyvidgn1Ve ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RsprbbIF5J ++ cat /tmp/tmp.oyvidgn1Ve ++ rm /tmp/tmp.RsprbbIF5J /tmp/tmp.oyvidgn1Ve ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v3k2peDIzx +++ mktemp ++ local LAST_ERR=/tmp/tmp.gUDHFRpuWa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v3k2peDIzx ++ cat /tmp/tmp.gUDHFRpuWa ++ rm /tmp/tmp.v3k2peDIzx /tmp/tmp.gUDHFRpuWa ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GNIW0PsSY7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DbEOyV7xnh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GNIW0PsSY7 ++ cat /tmp/tmp.DbEOyV7xnh ++ rm /tmp/tmp.GNIW0PsSY7 /tmp/tmp.DbEOyV7xnh ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IuxbbZhpXV +++ mktemp ++ local LAST_ERR=/tmp/tmp.cyyy9eMPnk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IuxbbZhpXV ++ cat /tmp/tmp.cyyy9eMPnk ++ rm /tmp/tmp.IuxbbZhpXV /tmp/tmp.cyyy9eMPnk ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VyQId71ptU +++ mktemp ++ local LAST_ERR=/tmp/tmp.mOC1FTghUd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VyQId71ptU ++ cat /tmp/tmp.mOC1FTghUd ++ rm /tmp/tmp.VyQId71ptU /tmp/tmp.mOC1FTghUd ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w4NUE0luRo +++ mktemp ++ local LAST_ERR=/tmp/tmp.PpMW51KIM2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w4NUE0luRo ++ cat /tmp/tmp.PpMW51KIM2 ++ rm /tmp/tmp.w4NUE0luRo /tmp/tmp.PpMW51KIM2 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jsagpR2izk +++ mktemp ++ local LAST_ERR=/tmp/tmp.35revEjn2d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jsagpR2izk ++ cat /tmp/tmp.35revEjn2d ++ rm /tmp/tmp.jsagpR2izk /tmp/tmp.35revEjn2d ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K5JMpYG0x9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pQbhZJnrmZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K5JMpYG0x9 ++ cat /tmp/tmp.pQbhZJnrmZ ++ rm /tmp/tmp.K5JMpYG0x9 /tmp/tmp.pQbhZJnrmZ ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dMGwCGhxdz +++ mktemp ++ local LAST_ERR=/tmp/tmp.JOzbPl47nL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dMGwCGhxdz ++ cat /tmp/tmp.JOzbPl47nL ++ rm /tmp/tmp.dMGwCGhxdz /tmp/tmp.JOzbPl47nL ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.deMRwUYRU7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YPoWT5a5b5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.deMRwUYRU7 ++ cat /tmp/tmp.YPoWT5a5b5 ++ rm /tmp/tmp.deMRwUYRU7 /tmp/tmp.YPoWT5a5b5 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cae6CeL8y4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gO9qVfOk5U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cae6CeL8y4 ++ cat /tmp/tmp.gO9qVfOk5U ++ rm /tmp/tmp.Cae6CeL8y4 /tmp/tmp.gO9qVfOk5U ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mp0FakgWA4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QC8DS0SYpL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mp0FakgWA4 ++ cat /tmp/tmp.QC8DS0SYpL ++ rm /tmp/tmp.mp0FakgWA4 /tmp/tmp.QC8DS0SYpL ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I1a5bh4AdK +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yfe3XCcRJU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I1a5bh4AdK ++ cat /tmp/tmp.Yfe3XCcRJU ++ rm /tmp/tmp.I1a5bh4AdK /tmp/tmp.Yfe3XCcRJU ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RgWyDe664C +++ mktemp ++ local LAST_ERR=/tmp/tmp.jdicVewPPm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RgWyDe664C ++ cat /tmp/tmp.jdicVewPPm ++ rm /tmp/tmp.RgWyDe664C /tmp/tmp.jdicVewPPm ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HcBZHps7L9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kVVNuh6kPg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HcBZHps7L9 ++ cat /tmp/tmp.kVVNuh6kPg ++ rm /tmp/tmp.HcBZHps7L9 /tmp/tmp.kVVNuh6kPg ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K65Xz1a82u +++ mktemp ++ local LAST_ERR=/tmp/tmp.xjXfNzhyTX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K65Xz1a82u ++ cat /tmp/tmp.xjXfNzhyTX ++ rm /tmp/tmp.K65Xz1a82u /tmp/tmp.xjXfNzhyTX ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.khkfkwrnFI +++ mktemp ++ local LAST_ERR=/tmp/tmp.9WdVZ3aacZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.khkfkwrnFI ++ cat /tmp/tmp.9WdVZ3aacZ ++ rm /tmp/tmp.khkfkwrnFI /tmp/tmp.9WdVZ3aacZ ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wlth78GcV4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.J6lPlggE1U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wlth78GcV4 ++ cat /tmp/tmp.J6lPlggE1U ++ rm /tmp/tmp.wlth78GcV4 /tmp/tmp.J6lPlggE1U ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yGqy2woOyu +++ mktemp ++ local LAST_ERR=/tmp/tmp.mNliyuzH3a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yGqy2woOyu ++ cat /tmp/tmp.mNliyuzH3a ++ rm /tmp/tmp.yGqy2woOyu /tmp/tmp.mNliyuzH3a ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.s9xTSaTUKy +++ mktemp ++ local LAST_ERR=/tmp/tmp.rH81F0xhRI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s9xTSaTUKy ++ cat /tmp/tmp.rH81F0xhRI ++ rm /tmp/tmp.s9xTSaTUKy /tmp/tmp.rH81F0xhRI ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wmrAVmiD4H +++ mktemp ++ local LAST_ERR=/tmp/tmp.lFVCFy0TX0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wmrAVmiD4H ++ cat /tmp/tmp.lFVCFy0TX0 ++ rm /tmp/tmp.wmrAVmiD4H /tmp/tmp.lFVCFy0TX0 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ptlrt0Ep6b +++ mktemp ++ local LAST_ERR=/tmp/tmp.chz4fD1K8A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ptlrt0Ep6b ++ cat /tmp/tmp.chz4fD1K8A ++ rm /tmp/tmp.ptlrt0Ep6b /tmp/tmp.chz4fD1K8A ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nCONapttQY +++ mktemp ++ local LAST_ERR=/tmp/tmp.z2Bs3QZOu0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nCONapttQY ++ cat /tmp/tmp.z2Bs3QZOu0 ++ rm /tmp/tmp.nCONapttQY /tmp/tmp.z2Bs3QZOu0 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2Ev1iglH4r +++ mktemp ++ local LAST_ERR=/tmp/tmp.0OA5sD7ld0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2Ev1iglH4r ++ cat /tmp/tmp.0OA5sD7ld0 ++ rm /tmp/tmp.2Ev1iglH4r /tmp/tmp.0OA5sD7ld0 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8dmUQmQmX1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sFpROo8TGb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8dmUQmQmX1 ++ cat /tmp/tmp.sFpROo8TGb ++ rm /tmp/tmp.8dmUQmQmX1 /tmp/tmp.sFpROo8TGb ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w0UDaBwuNB +++ mktemp ++ local LAST_ERR=/tmp/tmp.UH6yKzvbTd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w0UDaBwuNB ++ cat /tmp/tmp.UH6yKzvbTd ++ rm /tmp/tmp.w0UDaBwuNB /tmp/tmp.UH6yKzvbTd ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3H6P3ex5Ku +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ewpqpl6BcA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3H6P3ex5Ku ++ cat /tmp/tmp.Ewpqpl6BcA ++ rm /tmp/tmp.3H6P3ex5Ku /tmp/tmp.Ewpqpl6BcA ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H27MASgScv +++ mktemp ++ local LAST_ERR=/tmp/tmp.KNYEwVdI0X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H27MASgScv ++ cat /tmp/tmp.KNYEwVdI0X ++ rm /tmp/tmp.H27MASgScv /tmp/tmp.KNYEwVdI0X ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vWODs22bQJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.nMvf5pgwc3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vWODs22bQJ ++ cat /tmp/tmp.nMvf5pgwc3 ++ rm /tmp/tmp.vWODs22bQJ /tmp/tmp.nMvf5pgwc3 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oogCAgGxwa +++ mktemp ++ local LAST_ERR=/tmp/tmp.8cI2HuR1qJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oogCAgGxwa ++ cat /tmp/tmp.8cI2HuR1qJ ++ rm /tmp/tmp.oogCAgGxwa /tmp/tmp.8cI2HuR1qJ ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.COKjTFbiZ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FF8PsY5II7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.COKjTFbiZ1 ++ cat /tmp/tmp.FF8PsY5II7 ++ rm /tmp/tmp.COKjTFbiZ1 /tmp/tmp.FF8PsY5II7 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qUoTsbppE6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8E2JOGkqDJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qUoTsbppE6 ++ cat /tmp/tmp.8E2JOGkqDJ ++ rm /tmp/tmp.qUoTsbppE6 /tmp/tmp.8E2JOGkqDJ ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iisCX62hq1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8eTINFt8yP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iisCX62hq1 ++ cat /tmp/tmp.8eTINFt8yP ++ rm /tmp/tmp.iisCX62hq1 /tmp/tmp.8eTINFt8yP ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NkMjUZ6LkR +++ mktemp ++ local LAST_ERR=/tmp/tmp.GxSLfFPXJe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NkMjUZ6LkR ++ cat /tmp/tmp.GxSLfFPXJe ++ rm /tmp/tmp.NkMjUZ6LkR /tmp/tmp.GxSLfFPXJe ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=45 + echo -n . .+ [[ 45 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1m5an190Fi +++ mktemp ++ local LAST_ERR=/tmp/tmp.VN9agPcu4m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1m5an190Fi ++ cat /tmp/tmp.VN9agPcu4m ++ rm /tmp/tmp.1m5an190Fi /tmp/tmp.VN9agPcu4m ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=46 + echo -n . .+ [[ 46 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k9YdePTLv3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.60dcfSngv9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k9YdePTLv3 ++ cat /tmp/tmp.60dcfSngv9 ++ rm /tmp/tmp.k9YdePTLv3 /tmp/tmp.60dcfSngv9 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=47 + echo -n . .+ [[ 47 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dAmtZYi0J6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gjWDpc1yFm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dAmtZYi0J6 ++ cat /tmp/tmp.gjWDpc1yFm ++ rm /tmp/tmp.dAmtZYi0J6 /tmp/tmp.gjWDpc1yFm ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=48 + echo -n . .+ [[ 48 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v6joLT7Ut3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wEc25TuGOa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v6joLT7Ut3 ++ cat /tmp/tmp.wEc25TuGOa ++ rm /tmp/tmp.v6joLT7Ut3 /tmp/tmp.wEc25TuGOa ++ return 0 + [[ error == paused ]] + sleep 1 + timeout=49 + echo -n . .+ [[ 49 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tl8CmR5YPo +++ mktemp ++ local LAST_ERR=/tmp/tmp.L8Bhm15eES ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Tl8CmR5YPo ++ cat /tmp/tmp.L8Bhm15eES ++ rm /tmp/tmp.Tl8CmR5YPo /tmp/tmp.L8Bhm15eES ++ return 0 + [[ error == paused ]] + sleep 1 + timeout=50 + echo -n . .+ [[ 50 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3wINO6oqkH +++ mktemp ++ local LAST_ERR=/tmp/tmp.ev6WfVgGlP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3wINO6oqkH ++ cat /tmp/tmp.ev6WfVgGlP ++ rm /tmp/tmp.3wINO6oqkH /tmp/tmp.ev6WfVgGlP ++ return 0 + [[ error == paused ]] + sleep 1 + timeout=51 + echo -n . .+ [[ 51 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gfFdecx9Ye +++ mktemp ++ local LAST_ERR=/tmp/tmp.fQcbz2zu2K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gfFdecx9Ye ++ cat /tmp/tmp.fQcbz2zu2K ++ rm /tmp/tmp.gfFdecx9Ye /tmp/tmp.fQcbz2zu2K ++ return 0 + [[ error == paused ]] + sleep 1 + timeout=52 + echo -n . .+ [[ 52 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4OSZwGott1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LB1X5XdvKK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4OSZwGott1 ++ cat /tmp/tmp.LB1X5XdvKK ++ rm /tmp/tmp.4OSZwGott1 /tmp/tmp.LB1X5XdvKK ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=53 + echo -n . .+ [[ 53 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.huX5ZtQTMY +++ mktemp ++ local LAST_ERR=/tmp/tmp.lKHgCHYQIK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.huX5ZtQTMY ++ cat /tmp/tmp.lKHgCHYQIK ++ rm /tmp/tmp.huX5ZtQTMY /tmp/tmp.lKHgCHYQIK ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=54 + echo -n . .+ [[ 54 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g9OeUoOqvb +++ mktemp ++ local LAST_ERR=/tmp/tmp.7goMVt0yoM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g9OeUoOqvb ++ cat /tmp/tmp.7goMVt0yoM ++ rm /tmp/tmp.g9OeUoOqvb /tmp/tmp.7goMVt0yoM ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=55 + echo -n . .+ [[ 55 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Yx8inSGjQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hs2OHs2166 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0Yx8inSGjQ ++ cat /tmp/tmp.Hs2OHs2166 ++ rm /tmp/tmp.0Yx8inSGjQ /tmp/tmp.Hs2OHs2166 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=56 + echo -n . .+ [[ 56 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yvBh0TcPTH +++ mktemp ++ local LAST_ERR=/tmp/tmp.FCVRCYsZYW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yvBh0TcPTH ++ cat /tmp/tmp.FCVRCYsZYW ++ rm /tmp/tmp.yvBh0TcPTH /tmp/tmp.FCVRCYsZYW ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=57 + echo -n . .+ [[ 57 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CRdHkgZ9hw +++ mktemp ++ local LAST_ERR=/tmp/tmp.u6in9UN414 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CRdHkgZ9hw ++ cat /tmp/tmp.u6in9UN414 ++ rm /tmp/tmp.CRdHkgZ9hw /tmp/tmp.u6in9UN414 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=58 + echo -n . .+ [[ 58 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N9MODXLJmF +++ mktemp ++ local LAST_ERR=/tmp/tmp.k3Qalg7Obc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N9MODXLJmF ++ cat /tmp/tmp.k3Qalg7Obc ++ rm /tmp/tmp.N9MODXLJmF /tmp/tmp.k3Qalg7Obc ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=59 + echo -n . .+ [[ 59 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sdw554ab3v +++ mktemp ++ local LAST_ERR=/tmp/tmp.NlS2cObSK7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sdw554ab3v ++ cat /tmp/tmp.NlS2cObSK7 ++ rm /tmp/tmp.sdw554ab3v /tmp/tmp.NlS2cObSK7 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=60 + echo -n . .+ [[ 60 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mbssutawPS +++ mktemp ++ local LAST_ERR=/tmp/tmp.WpgYiN0xTc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mbssutawPS ++ cat /tmp/tmp.WpgYiN0xTc ++ rm /tmp/tmp.mbssutawPS /tmp/tmp.WpgYiN0xTc ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=61 + echo -n . .+ [[ 61 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NCR1CNrreE +++ mktemp ++ local LAST_ERR=/tmp/tmp.d8jJFnCAm8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NCR1CNrreE ++ cat /tmp/tmp.d8jJFnCAm8 ++ rm /tmp/tmp.NCR1CNrreE /tmp/tmp.d8jJFnCAm8 ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=62 + echo -n . .+ [[ 62 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vXHAmvgnz9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.P3ZkLTj9tu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vXHAmvgnz9 ++ cat /tmp/tmp.P3ZkLTj9tu ++ rm /tmp/tmp.vXHAmvgnz9 /tmp/tmp.P3ZkLTj9tu ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=63 + echo -n . .+ [[ 63 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eqpZiZBuYC +++ mktemp ++ local LAST_ERR=/tmp/tmp.pe1j0Is1Go ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eqpZiZBuYC ++ cat /tmp/tmp.pe1j0Is1Go ++ rm /tmp/tmp.eqpZiZBuYC /tmp/tmp.pe1j0Is1Go ++ return 0 + [[ stopping == paused ]] + sleep 1 + timeout=64 + echo -n . .+ [[ 64 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YzGWHYCoET +++ mktemp ++ local LAST_ERR=/tmp/tmp.C4bGWUl3dF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YzGWHYCoET ++ cat /tmp/tmp.C4bGWUl3dF ++ rm /tmp/tmp.YzGWHYCoET /tmp/tmp.C4bGWUl3dF ++ return 0 + [[ paused == paused ]] + echo + disable_tls some-name + local cluster_name=some-name + echo 'Disabling TLS for cluster some-name' Disabling TLS for cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' ++ mktemp + local LAST_OUT=/tmp/tmp.3ru674fLyQ ++ mktemp + local LAST_ERR=/tmp/tmp.aUoCoKQoYm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "unsafeFlags": { "tls": true }, "tls": { "mode": "disabled" } } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3ru674fLyQ perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.aUoCoKQoYm + rm /tmp/tmp.3ru674fLyQ /tmp/tmp.aUoCoKQoYm + return 0 + unpause_cluster some-name + local cluster_name=some-name + echo 'Unpausing cluster some-name' Unpausing cluster some-name + kubectl_bin patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' ++ mktemp + local LAST_OUT=/tmp/tmp.R7osczHLI2 ++ mktemp + local LAST_ERR=/tmp/tmp.93Wyk5AppT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb some-name --type merge '-p={"spec": { "pause": false } }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R7osczHLI2 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.93Wyk5AppT + rm /tmp/tmp.R7osczHLI2 /tmp/tmp.93Wyk5AppT + return 0 + wait_for_cluster_state some-name ready + local cluster_name=some-name + local target_state=ready + echo -n 'Waiting for cluster to reach ready state' Waiting for cluster to reach ready state+ local timeout=0 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n3yQe7f6c0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3lbXT6ARE1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n3yQe7f6c0 ++ cat /tmp/tmp.3lbXT6ARE1 ++ rm /tmp/tmp.n3yQe7f6c0 /tmp/tmp.3lbXT6ARE1 ++ return 0 + [[ paused == ready ]] + sleep 1 + timeout=1 + echo -n . .+ [[ 1 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MTLysm2uyk +++ mktemp ++ local LAST_ERR=/tmp/tmp.mhzhdlqrm7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MTLysm2uyk ++ cat /tmp/tmp.mhzhdlqrm7 ++ rm /tmp/tmp.MTLysm2uyk /tmp/tmp.mhzhdlqrm7 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=2 + echo -n . .+ [[ 2 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O7jlp40r9A +++ mktemp ++ local LAST_ERR=/tmp/tmp.byaru2LMOX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O7jlp40r9A ++ cat /tmp/tmp.byaru2LMOX ++ rm /tmp/tmp.O7jlp40r9A /tmp/tmp.byaru2LMOX ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=3 + echo -n . .+ [[ 3 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2hZ0LC3oen +++ mktemp ++ local LAST_ERR=/tmp/tmp.CqHRjtTN9G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2hZ0LC3oen ++ cat /tmp/tmp.CqHRjtTN9G ++ rm /tmp/tmp.2hZ0LC3oen /tmp/tmp.CqHRjtTN9G ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=4 + echo -n . .+ [[ 4 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K2Egreylpi +++ mktemp ++ local LAST_ERR=/tmp/tmp.LMdydh4AMg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K2Egreylpi ++ cat /tmp/tmp.LMdydh4AMg ++ rm /tmp/tmp.K2Egreylpi /tmp/tmp.LMdydh4AMg ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=5 + echo -n . .+ [[ 5 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Oj599ro4up +++ mktemp ++ local LAST_ERR=/tmp/tmp.N6H3guldKQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Oj599ro4up ++ cat /tmp/tmp.N6H3guldKQ ++ rm /tmp/tmp.Oj599ro4up /tmp/tmp.N6H3guldKQ ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=6 + echo -n . .+ [[ 6 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NPxanM5RMP +++ mktemp ++ local LAST_ERR=/tmp/tmp.PUC8nuHJ6Q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NPxanM5RMP ++ cat /tmp/tmp.PUC8nuHJ6Q ++ rm /tmp/tmp.NPxanM5RMP /tmp/tmp.PUC8nuHJ6Q ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=7 + echo -n . .+ [[ 7 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tuUhSsUtzI +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jiex2yoPuM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tuUhSsUtzI ++ cat /tmp/tmp.Jiex2yoPuM ++ rm /tmp/tmp.tuUhSsUtzI /tmp/tmp.Jiex2yoPuM ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=8 + echo -n . .+ [[ 8 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UNGjDG9zSl +++ mktemp ++ local LAST_ERR=/tmp/tmp.7gD3VBn63z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UNGjDG9zSl ++ cat /tmp/tmp.7gD3VBn63z ++ rm /tmp/tmp.UNGjDG9zSl /tmp/tmp.7gD3VBn63z ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=9 + echo -n . .+ [[ 9 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QIuve0blMg +++ mktemp ++ local LAST_ERR=/tmp/tmp.VobVQGa4VD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QIuve0blMg ++ cat /tmp/tmp.VobVQGa4VD ++ rm /tmp/tmp.QIuve0blMg /tmp/tmp.VobVQGa4VD ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=10 + echo -n . .+ [[ 10 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.odXZYaPST6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IpW19YePdQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.odXZYaPST6 ++ cat /tmp/tmp.IpW19YePdQ ++ rm /tmp/tmp.odXZYaPST6 /tmp/tmp.IpW19YePdQ ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=11 + echo -n . .+ [[ 11 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2ydlg4uihw +++ mktemp ++ local LAST_ERR=/tmp/tmp.0ld4HhMy9u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2ydlg4uihw ++ cat /tmp/tmp.0ld4HhMy9u ++ rm /tmp/tmp.2ydlg4uihw /tmp/tmp.0ld4HhMy9u ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=12 + echo -n . .+ [[ 12 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.icNkdNXm7i +++ mktemp ++ local LAST_ERR=/tmp/tmp.7HwV5rw2Xj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.icNkdNXm7i ++ cat /tmp/tmp.7HwV5rw2Xj ++ rm /tmp/tmp.icNkdNXm7i /tmp/tmp.7HwV5rw2Xj ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=13 + echo -n . .+ [[ 13 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BO3vEpwMNJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.tMLTkpW92g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BO3vEpwMNJ ++ cat /tmp/tmp.tMLTkpW92g ++ rm /tmp/tmp.BO3vEpwMNJ /tmp/tmp.tMLTkpW92g ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=14 + echo -n . .+ [[ 14 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tIriHmCa5J +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kdw21s0fwU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tIriHmCa5J ++ cat /tmp/tmp.Kdw21s0fwU ++ rm /tmp/tmp.tIriHmCa5J /tmp/tmp.Kdw21s0fwU ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=15 + echo -n . .+ [[ 15 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TgbiGtYPBj +++ mktemp ++ local LAST_ERR=/tmp/tmp.KNgZeaENtR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TgbiGtYPBj ++ cat /tmp/tmp.KNgZeaENtR ++ rm /tmp/tmp.TgbiGtYPBj /tmp/tmp.KNgZeaENtR ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=16 + echo -n . .+ [[ 16 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L6HZVc44Ik +++ mktemp ++ local LAST_ERR=/tmp/tmp.wA7cB9ODQM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L6HZVc44Ik ++ cat /tmp/tmp.wA7cB9ODQM ++ rm /tmp/tmp.L6HZVc44Ik /tmp/tmp.wA7cB9ODQM ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=17 + echo -n . .+ [[ 17 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BcEIVHpfB8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iYoqQMFPGz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BcEIVHpfB8 ++ cat /tmp/tmp.iYoqQMFPGz ++ rm /tmp/tmp.BcEIVHpfB8 /tmp/tmp.iYoqQMFPGz ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=18 + echo -n . .+ [[ 18 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mnJ9MtcaNT +++ mktemp ++ local LAST_ERR=/tmp/tmp.NOQrxvbK8W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mnJ9MtcaNT ++ cat /tmp/tmp.NOQrxvbK8W ++ rm /tmp/tmp.mnJ9MtcaNT /tmp/tmp.NOQrxvbK8W ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=19 + echo -n . .+ [[ 19 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E1QE8Y7OiS +++ mktemp ++ local LAST_ERR=/tmp/tmp.E7k14zMEMG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E1QE8Y7OiS ++ cat /tmp/tmp.E7k14zMEMG ++ rm /tmp/tmp.E1QE8Y7OiS /tmp/tmp.E7k14zMEMG ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=20 + echo -n . .+ [[ 20 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wKgypW5mvL +++ mktemp ++ local LAST_ERR=/tmp/tmp.W4MepRnUgI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wKgypW5mvL ++ cat /tmp/tmp.W4MepRnUgI ++ rm /tmp/tmp.wKgypW5mvL /tmp/tmp.W4MepRnUgI ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=21 + echo -n . .+ [[ 21 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IjZ5BLtzpC +++ mktemp ++ local LAST_ERR=/tmp/tmp.WZpKq5s2qU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IjZ5BLtzpC ++ cat /tmp/tmp.WZpKq5s2qU ++ rm /tmp/tmp.IjZ5BLtzpC /tmp/tmp.WZpKq5s2qU ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=22 + echo -n . .+ [[ 22 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HP91PnI3RZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.F0FaNXFS7W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HP91PnI3RZ ++ cat /tmp/tmp.F0FaNXFS7W ++ rm /tmp/tmp.HP91PnI3RZ /tmp/tmp.F0FaNXFS7W ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=23 + echo -n . .+ [[ 23 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UzSyTdkTOR +++ mktemp ++ local LAST_ERR=/tmp/tmp.tEdr8utvRO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UzSyTdkTOR ++ cat /tmp/tmp.tEdr8utvRO ++ rm /tmp/tmp.UzSyTdkTOR /tmp/tmp.tEdr8utvRO ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=24 + echo -n . .+ [[ 24 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w7k2Q4j6xz +++ mktemp ++ local LAST_ERR=/tmp/tmp.3laHElV7oO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w7k2Q4j6xz ++ cat /tmp/tmp.3laHElV7oO ++ rm /tmp/tmp.w7k2Q4j6xz /tmp/tmp.3laHElV7oO ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=25 + echo -n . .+ [[ 25 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.om0DR65agK +++ mktemp ++ local LAST_ERR=/tmp/tmp.dvlDNe9IU7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.om0DR65agK ++ cat /tmp/tmp.dvlDNe9IU7 ++ rm /tmp/tmp.om0DR65agK /tmp/tmp.dvlDNe9IU7 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=26 + echo -n . .+ [[ 26 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u483WOXyoD +++ mktemp ++ local LAST_ERR=/tmp/tmp.4PDFlOscJ0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u483WOXyoD ++ cat /tmp/tmp.4PDFlOscJ0 ++ rm /tmp/tmp.u483WOXyoD /tmp/tmp.4PDFlOscJ0 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=27 + echo -n . .+ [[ 27 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q2B2mSGOzQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.8KdrPCFkyV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q2B2mSGOzQ ++ cat /tmp/tmp.8KdrPCFkyV ++ rm /tmp/tmp.q2B2mSGOzQ /tmp/tmp.8KdrPCFkyV ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=28 + echo -n . .+ [[ 28 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f5hZdUYBq3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.37TV3ZKIRa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f5hZdUYBq3 ++ cat /tmp/tmp.37TV3ZKIRa ++ rm /tmp/tmp.f5hZdUYBq3 /tmp/tmp.37TV3ZKIRa ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=29 + echo -n . .+ [[ 29 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.98bFOvpEQm +++ mktemp ++ local LAST_ERR=/tmp/tmp.DQj6zLpmyN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.98bFOvpEQm ++ cat /tmp/tmp.DQj6zLpmyN ++ rm /tmp/tmp.98bFOvpEQm /tmp/tmp.DQj6zLpmyN ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=30 + echo -n . .+ [[ 30 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.naSNGBTjXj +++ mktemp ++ local LAST_ERR=/tmp/tmp.yYYf5Zv6AW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.naSNGBTjXj ++ cat /tmp/tmp.yYYf5Zv6AW ++ rm /tmp/tmp.naSNGBTjXj /tmp/tmp.yYYf5Zv6AW ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=31 + echo -n . .+ [[ 31 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Fg2R6L6QH +++ mktemp ++ local LAST_ERR=/tmp/tmp.8KVYg4rOXV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0Fg2R6L6QH ++ cat /tmp/tmp.8KVYg4rOXV ++ rm /tmp/tmp.0Fg2R6L6QH /tmp/tmp.8KVYg4rOXV ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=32 + echo -n . .+ [[ 32 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pSuhjGbiOG +++ mktemp ++ local LAST_ERR=/tmp/tmp.1nwxYtt6XY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pSuhjGbiOG ++ cat /tmp/tmp.1nwxYtt6XY ++ rm /tmp/tmp.pSuhjGbiOG /tmp/tmp.1nwxYtt6XY ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=33 + echo -n . .+ [[ 33 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TqNLJIGYhp +++ mktemp ++ local LAST_ERR=/tmp/tmp.dIk54UFJD8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TqNLJIGYhp ++ cat /tmp/tmp.dIk54UFJD8 ++ rm /tmp/tmp.TqNLJIGYhp /tmp/tmp.dIk54UFJD8 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=34 + echo -n . .+ [[ 34 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eHAyxA8BQ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jtV9YBthw0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eHAyxA8BQ1 ++ cat /tmp/tmp.jtV9YBthw0 ++ rm /tmp/tmp.eHAyxA8BQ1 /tmp/tmp.jtV9YBthw0 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=35 + echo -n . .+ [[ 35 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UYML3lZyPN +++ mktemp ++ local LAST_ERR=/tmp/tmp.6nvarLXTqJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UYML3lZyPN ++ cat /tmp/tmp.6nvarLXTqJ ++ rm /tmp/tmp.UYML3lZyPN /tmp/tmp.6nvarLXTqJ ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=36 + echo -n . .+ [[ 36 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lN7vqqlC3Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y5OpO4YLTt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lN7vqqlC3Y ++ cat /tmp/tmp.Y5OpO4YLTt ++ rm /tmp/tmp.lN7vqqlC3Y /tmp/tmp.Y5OpO4YLTt ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=37 + echo -n . .+ [[ 37 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tsQxityfJy +++ mktemp ++ local LAST_ERR=/tmp/tmp.hd53xEoD38 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tsQxityfJy ++ cat /tmp/tmp.hd53xEoD38 ++ rm /tmp/tmp.tsQxityfJy /tmp/tmp.hd53xEoD38 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=38 + echo -n . .+ [[ 38 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.URd0kBNOmi +++ mktemp ++ local LAST_ERR=/tmp/tmp.EXV0cuEIJ8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.URd0kBNOmi ++ cat /tmp/tmp.EXV0cuEIJ8 ++ rm /tmp/tmp.URd0kBNOmi /tmp/tmp.EXV0cuEIJ8 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=39 + echo -n . .+ [[ 39 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GB7cQdEzzm +++ mktemp ++ local LAST_ERR=/tmp/tmp.5JiDWKAMmN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GB7cQdEzzm ++ cat /tmp/tmp.5JiDWKAMmN ++ rm /tmp/tmp.GB7cQdEzzm /tmp/tmp.5JiDWKAMmN ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=40 + echo -n . .+ [[ 40 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LrV8oQWHSu +++ mktemp ++ local LAST_ERR=/tmp/tmp.6LzbHBy6Uh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LrV8oQWHSu ++ cat /tmp/tmp.6LzbHBy6Uh ++ rm /tmp/tmp.LrV8oQWHSu /tmp/tmp.6LzbHBy6Uh ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=41 + echo -n . .+ [[ 41 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VpDMOmoZKZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.jZrSsusKsa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VpDMOmoZKZ ++ cat /tmp/tmp.jZrSsusKsa ++ rm /tmp/tmp.VpDMOmoZKZ /tmp/tmp.jZrSsusKsa ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=42 + echo -n . .+ [[ 42 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2lCV4uAmta +++ mktemp ++ local LAST_ERR=/tmp/tmp.k0qEGPjNJT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2lCV4uAmta ++ cat /tmp/tmp.k0qEGPjNJT ++ rm /tmp/tmp.2lCV4uAmta /tmp/tmp.k0qEGPjNJT ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=43 + echo -n . .+ [[ 43 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZsakflsuqL +++ mktemp ++ local LAST_ERR=/tmp/tmp.mBWmCmt63z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZsakflsuqL ++ cat /tmp/tmp.mBWmCmt63z ++ rm /tmp/tmp.ZsakflsuqL /tmp/tmp.mBWmCmt63z ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=44 + echo -n . .+ [[ 44 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cb8MMwSyYV +++ mktemp ++ local LAST_ERR=/tmp/tmp.xIJSIhY3oM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cb8MMwSyYV ++ cat /tmp/tmp.xIJSIhY3oM ++ rm /tmp/tmp.Cb8MMwSyYV /tmp/tmp.xIJSIhY3oM ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=45 + echo -n . .+ [[ 45 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YcMzOjMbja +++ mktemp ++ local LAST_ERR=/tmp/tmp.DLegJJfX5R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YcMzOjMbja ++ cat /tmp/tmp.DLegJJfX5R ++ rm /tmp/tmp.YcMzOjMbja /tmp/tmp.DLegJJfX5R ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=46 + echo -n . .+ [[ 46 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gwx1ypST08 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SuUCgORH6O ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gwx1ypST08 ++ cat /tmp/tmp.SuUCgORH6O ++ rm /tmp/tmp.Gwx1ypST08 /tmp/tmp.SuUCgORH6O ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=47 + echo -n . .+ [[ 47 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5FLiN6QjLy +++ mktemp ++ local LAST_ERR=/tmp/tmp.RWTdTrP87Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5FLiN6QjLy ++ cat /tmp/tmp.RWTdTrP87Y ++ rm /tmp/tmp.5FLiN6QjLy /tmp/tmp.RWTdTrP87Y ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=48 + echo -n . .+ [[ 48 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DYnCY03pr7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2eCid3EAgN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DYnCY03pr7 ++ cat /tmp/tmp.2eCid3EAgN ++ rm /tmp/tmp.DYnCY03pr7 /tmp/tmp.2eCid3EAgN ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=49 + echo -n . .+ [[ 49 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mfe5LodM1Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.c7bBDFDM3N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mfe5LodM1Y ++ cat /tmp/tmp.c7bBDFDM3N ++ rm /tmp/tmp.mfe5LodM1Y /tmp/tmp.c7bBDFDM3N ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=50 + echo -n . .+ [[ 50 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.blQaXmfZ7I +++ mktemp ++ local LAST_ERR=/tmp/tmp.cuNPD3zVp5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.blQaXmfZ7I ++ cat /tmp/tmp.cuNPD3zVp5 ++ rm /tmp/tmp.blQaXmfZ7I /tmp/tmp.cuNPD3zVp5 ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=51 + echo -n . .+ [[ 51 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZrX8celxiO +++ mktemp ++ local LAST_ERR=/tmp/tmp.RB48dfVJpZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZrX8celxiO ++ cat /tmp/tmp.RB48dfVJpZ ++ rm /tmp/tmp.ZrX8celxiO /tmp/tmp.RB48dfVJpZ ++ return 0 + [[ initializing == ready ]] + sleep 1 + timeout=52 + echo -n . .+ [[ 52 -gt 1500 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eko3H9FVkE +++ mktemp ++ local LAST_ERR=/tmp/tmp.J6qNgMVr50 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eko3H9FVkE ++ cat /tmp/tmp.J6qNgMVr50 ++ rm /tmp/tmp.eko3H9FVkE /tmp/tmp.J6qNgMVr50 ++ return 0 + [[ ready == ready ]] + echo + compare_kubectl statefulset/some-name-rs0 -tls-disabled + local resource=statefulset/some-name-rs0 + local postfix=-tls-disabled + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml + local new_result=/tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.tF8h5VfvK9 ++ mktemp + local LAST_ERR=/tmp/tmp.c7k0rJAWfm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tF8h5VfvK9 + cat /tmp/tmp.c7k0rJAWfm + rm /tmp/tmp.tF8h5VfvK9 /tmp/tmp.c7k0rJAWfm + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-rs0-tls-disabled.yml /tmp/tmp.1McPIVQius/statefulset_some-name-rs0.yml + compare_kubectl statefulset/some-name-cfg -tls-disabled + local resource=statefulset/some-name-cfg + local postfix=-tls-disabled + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml + local new_result=/tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.AINBgFcU7q ++ mktemp + local LAST_ERR=/tmp/tmp.pmGLlL3ihd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AINBgFcU7q + cat /tmp/tmp.pmGLlL3ihd + rm /tmp/tmp.AINBgFcU7q /tmp/tmp.pmGLlL3ihd + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-cfg-tls-disabled.yml /tmp/tmp.1McPIVQius/statefulset_some-name-cfg.yml + compare_kubectl statefulset/some-name-mongos -tls-disabled + local resource=statefulset/some-name-mongos + local postfix=-tls-disabled + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml + local new_result=/tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("tls-issue-cert-manager-11961", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.PLVD8IHnF3 ++ mktemp + local LAST_ERR=/tmp/tmp.mThtEUUwgT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PLVD8IHnF3 + cat /tmp/tmp.mThtEUUwgT + rm /tmp/tmp.PLVD8IHnF3 /tmp/tmp.mThtEUUwgT + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/e2e-tests/tls-issue-cert-manager/compare/statefulset_some-name-mongos-tls-disabled.yml /tmp/tmp.1McPIVQius/statefulset_some-name-mongos.yml + destroy tls-issue-cert-manager-11961 + local namespace=tls-issue-cert-manager-11961 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.hQ9zXjTurD ++ mktemp + local LAST_ERR=/tmp/tmp.4BdBwqIF1G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hQ9zXjTurD customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.4BdBwqIF1G + rm /tmp/tmp.hQ9zXjTurD /tmp/tmp.4BdBwqIF1G + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.hZdEFNxsP1 ++ mktemp + local LAST_ERR=/tmp/tmp.UkUXuGA4sj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hZdEFNxsP1 + cat /tmp/tmp.UkUXuGA4sj + rm /tmp/tmp.hZdEFNxsP1 /tmp/tmp.UkUXuGA4sj + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' E0719 20:23:56.573798 31679 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-10-0: the server could not find the requested resource E0719 20:23:56.574036 31679 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-12-0: the server could not find the requested resource E0719 20:23:56.574235 31679 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-11-0: the server could not find the requested resource E0719 20:23:56.677606 31679 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1: the server could not find the requested resource error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qKhbNRijWu ++ mktemp + local LAST_ERR=/tmp/tmp.Oc1ChB2SQa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qKhbNRijWu + cat /tmp/tmp.Oc1ChB2SQa + rm /tmp/tmp.qKhbNRijWu /tmp/tmp.Oc1ChB2SQa + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XDU9JX8Rqi ++ mktemp + local LAST_ERR=/tmp/tmp.YSweJ3EvGt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XDU9JX8Rqi + cat /tmp/tmp.YSweJ3EvGt + rm /tmp/tmp.XDU9JX8Rqi /tmp/tmp.YSweJ3EvGt + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.6a1Z6bFDAb ++ mktemp + local LAST_ERR=/tmp/tmp.4d0fThPuwp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1598/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6a1Z6bFDAb clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.4d0fThPuwp + rm /tmp/tmp.6a1Z6bFDAb /tmp/tmp.4d0fThPuwp + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.HCE538ZzCA ++ mktemp + local LAST_ERR=/tmp/tmp.xrO7DoeYNV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.HCE538ZzCA namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.xrO7DoeYNV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.HCE538ZzCA namespace "cert-manager" deleted + cat /tmp/tmp.xrO7DoeYNV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.HCE538ZzCA + cat /tmp/tmp.xrO7DoeYNV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.HCE538ZzCA + cat /tmp/tmp.xrO7DoeYNV Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.HCE538ZzCA /tmp/tmp.xrO7DoeYNV + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace tls-issue-cert-manager-11961 + rm -rf /tmp/tmp.1McPIVQius + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.HZILJLV3qN + desc 'test passed' + set +o xtrace + local LAST_OUT=/tmp/tmp.NkTyWSG3Ht ----------------------------------------------------------------------------------- test passed ++ mktemp ----------------------------------------------------------------------------------- ++ mktemp + local LAST_ERR=/tmp/tmp.heeQhXQNxD + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.vRUN1358Ch + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace tls-issue-cert-manager-11961 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator