Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/logs/upgrade-consistency-sharded-tls.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + CLUSTER=some-name + main + create_infra upgrade-consistency-sharded-tls-22729 + local ns=upgrade-consistency-sharded-tls-22729 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.6uTbTVROX2 ++ mktemp + local LAST_ERR=/tmp/tmp.VN8WvyqS9U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6uTbTVROX2 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.VN8WvyqS9U + rm /tmp/tmp.6uTbTVROX2 /tmp/tmp.VN8WvyqS9U + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ltqsyQhi6Q ++ mktemp + local LAST_ERR=/tmp/tmp.WbklbHbZ2d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ltqsyQhi6Q + cat /tmp/tmp.WbklbHbZ2d + rm /tmp/tmp.ltqsyQhi6Q /tmp/tmp.WbklbHbZ2d + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.GCZqqkmPQD ++ mktemp + local LAST_ERR=/tmp/tmp.biywbhVezw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GCZqqkmPQD + cat /tmp/tmp.biywbhVezw + rm /tmp/tmp.GCZqqkmPQD /tmp/tmp.biywbhVezw + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n upgrade-consistency-sharded-tls-1104 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0yqLHNECGi ++ mktemp + local LAST_ERR=/tmp/tmp.LGKfOygQM3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0yqLHNECGi customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.LGKfOygQM3 + rm /tmp/tmp.0yqLHNECGi /tmp/tmp.LGKfOygQM3 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.9d6T03W4yx ++ mktemp + local LAST_ERR=/tmp/tmp.bR0zPSFArK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9d6T03W4yx clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.bR0zPSFArK + rm /tmp/tmp.9d6T03W4yx /tmp/tmp.bR0zPSFArK + return 0 + check_crd_for_deletion PR-2044-6293f902 + local git_tag=PR-2044-6293f902 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2044-6293f902/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I5yQ6Wy0B4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NJ9jpiImcD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.I5yQ6Wy0B4 ++ cat /tmp/tmp.NJ9jpiImcD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.I5yQ6Wy0B4 ++ cat /tmp/tmp.NJ9jpiImcD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.I5yQ6Wy0B4 ++ cat /tmp/tmp.NJ9jpiImcD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.I5yQ6Wy0B4 ++ cat /tmp/tmp.NJ9jpiImcD Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.I5yQ6Wy0B4 /tmp/tmp.NJ9jpiImcD ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + awk '{print$1}' + xargs kubectl delete ns + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.1BOLGyQ5IM egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.oJHBJN6TMi ++ mktemp + local LAST_ERR=/tmp/tmp.nyufC2eWP1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.HDfU9H1QEK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1BOLGyQ5IM + cat /tmp/tmp.nyufC2eWP1 + rm /tmp/tmp.1BOLGyQ5IM /tmp/tmp.nyufC2eWP1 + return 0 namespace "cert-manager" deleted namespace "upgrade-consistency-sharded-tls-1104" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oJHBJN6TMi namespace "psmdb-operator" deleted + cat /tmp/tmp.HDfU9H1QEK + rm /tmp/tmp.oJHBJN6TMi /tmp/tmp.HDfU9H1QEK + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.CyJeSJPg2E ++ mktemp + local LAST_ERR=/tmp/tmp.GuDDJX1ycd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CyJeSJPg2E + cat /tmp/tmp.GuDDJX1ycd + rm /tmp/tmp.CyJeSJPg2E /tmp/tmp.GuDDJX1ycd + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.SVqxHSUrdF ++ mktemp + local LAST_ERR=/tmp/tmp.IeW2ZO3pTC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SVqxHSUrdF namespace/psmdb-operator created + cat /tmp/tmp.IeW2ZO3pTC + rm /tmp/tmp.SVqxHSUrdF /tmp/tmp.IeW2ZO3pTC + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.qAqxE7Wos6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IKAgqw8eSs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qAqxE7Wos6 ++ cat /tmp/tmp.IKAgqw8eSs ++ rm /tmp/tmp.qAqxE7Wos6 /tmp/tmp.IKAgqw8eSs ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-6293f902-6-cluster9 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.DQgufh5EAa ++ mktemp + local LAST_ERR=/tmp/tmp.zh4nH4d6Gd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-6293f902-6-cluster9 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DQgufh5EAa Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-6293f902-6-cluster9" modified. + cat /tmp/tmp.zh4nH4d6Gd + rm /tmp/tmp.DQgufh5EAa /tmp/tmp.zh4nH4d6Gd + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2044-6293f902' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2044-6293f902 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6OHsU88CdO ++ mktemp + local LAST_ERR=/tmp/tmp.uiLmXkHCXX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6OHsU88CdO customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.uiLmXkHCXX + rm /tmp/tmp.6OHsU88CdO /tmp/tmp.uiLmXkHCXX + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.X8326fx0SZ ++ mktemp + local LAST_ERR=/tmp/tmp.TeWcb9iyKJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X8326fx0SZ clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.TeWcb9iyKJ + rm /tmp/tmp.X8326fx0SZ /tmp/tmp.TeWcb9iyKJ + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2044-6293f902") | + kubectl_bin apply -n psmdb-operator -f - ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.MLbs7M7cD8 ++ mktemp + local LAST_ERR=/tmp/tmp.bv7rwead7d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MLbs7M7cD8 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.bv7rwead7d + rm /tmp/tmp.MLbs7M7cD8 /tmp/tmp.bv7rwead7d + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.2S87PQ6B64 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cB1VaBl8RL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2S87PQ6B64 ++ cat /tmp/tmp.cB1VaBl8RL ++ rm /tmp/tmp.2S87PQ6B64 /tmp/tmp.cB1VaBl8RL ++ return 0 + wait_operator_pod percona-server-mongodb-operator-55597d55d4-8964j + local pod=percona-server-mongodb-operator-55597d55d4-8964j + set +o xtrace waiting for pod/percona-server-mongodb-operator-55597d55d4-8964j to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.6smEZ6RGI3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rnECPzpzZD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6smEZ6RGI3 ++ cat /tmp/tmp.rnECPzpzZD ++ rm /tmp/tmp.6smEZ6RGI3 /tmp/tmp.rnECPzpzZD ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-55597d55d4-8964j ++ mktemp + local LAST_OUT=/tmp/tmp.W0zlKH9ah2 ++ mktemp + local LAST_ERR=/tmp/tmp.sbxA0es3E7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-55597d55d4-8964j + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W0zlKH9ah2 + cat /tmp/tmp.sbxA0es3E7 + rm /tmp/tmp.W0zlKH9ah2 /tmp/tmp.sbxA0es3E7 + return 0 2025-11-17T14:56:41.325Z INFO setup Manager starting up {"gitCommit": "6293f90271409106d42d5ed27016494a08db3282", "gitBranch": "PR-2044-6293f902", "buildTime": "", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} + create_namespace upgrade-consistency-sharded-tls-22729 + local namespace=upgrade-consistency-sharded-tls-22729 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces upgrade-consistency-sharded-tls-22729' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-consistency-sharded-tls-22729 ----------------------------------------------------------------------------------- ++ mktemp + kubectl_bin delete namespace upgrade-consistency-sharded-tls-22729 --ignore-not-found ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.gEDu8k6nJp ++ mktemp + local LAST_OUT=/tmp/tmp.ogV3OnNdNH + local LAST_ERR=/tmp/tmp.s7ol7nMB3O + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.rSB2n5PZxA + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace upgrade-consistency-sharded-tls-22729 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gEDu8k6nJp + cat /tmp/tmp.s7ol7nMB3O + rm /tmp/tmp.gEDu8k6nJp /tmp/tmp.s7ol7nMB3O + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ogV3OnNdNH + cat /tmp/tmp.rSB2n5PZxA + rm /tmp/tmp.ogV3OnNdNH /tmp/tmp.rSB2n5PZxA + return 0 + kubectl_bin wait --for=delete namespace upgrade-consistency-sharded-tls-22729 ++ mktemp + local LAST_OUT=/tmp/tmp.rL9qmzdRCi ++ mktemp + local LAST_ERR=/tmp/tmp.sArUN4UPB9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace upgrade-consistency-sharded-tls-22729 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rL9qmzdRCi + cat /tmp/tmp.sArUN4UPB9 + rm /tmp/tmp.rL9qmzdRCi /tmp/tmp.sArUN4UPB9 + return 0 + desc 'create namespace upgrade-consistency-sharded-tls-22729' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-consistency-sharded-tls-22729 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-consistency-sharded-tls-22729 ++ mktemp + local LAST_OUT=/tmp/tmp.UTNxa8wkdE ++ mktemp + local LAST_ERR=/tmp/tmp.aZKnntXojx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace upgrade-consistency-sharded-tls-22729 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UTNxa8wkdE namespace/upgrade-consistency-sharded-tls-22729 created + cat /tmp/tmp.aZKnntXojx + rm /tmp/tmp.UTNxa8wkdE /tmp/tmp.aZKnntXojx + return 0 + set_kube_ctx upgrade-consistency-sharded-tls-22729 + local namespace=upgrade-consistency-sharded-tls-22729 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.QlczZpHKHJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tl279PVFUy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QlczZpHKHJ ++ cat /tmp/tmp.Tl279PVFUy ++ rm /tmp/tmp.QlczZpHKHJ /tmp/tmp.Tl279PVFUy ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-6293f902-6-cluster9 --namespace=upgrade-consistency-sharded-tls-22729 ++ mktemp + local LAST_OUT=/tmp/tmp.97gspWTv8C ++ mktemp + local LAST_ERR=/tmp/tmp.aLMjnLfEjE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-6293f902-6-cluster9 --namespace=upgrade-consistency-sharded-tls-22729 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.97gspWTv8C Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2044-6293f902-6-cluster9" modified. + cat /tmp/tmp.aLMjnLfEjE + rm /tmp/tmp.97gspWTv8C /tmp/tmp.aLMjnLfEjE + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.j6jF1B5TpG ++ mktemp + local LAST_ERR=/tmp/tmp.4FATlH1LLn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j6jF1B5TpG namespace/cert-manager created + cat /tmp/tmp.4FATlH1LLn + rm /tmp/tmp.j6jF1B5TpG /tmp/tmp.4FATlH1LLn + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.aUQTIm3abu ++ mktemp + local LAST_ERR=/tmp/tmp.AYsip21gJ3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aUQTIm3abu namespace/cert-manager labeled + cat /tmp/tmp.AYsip21gJ3 + rm /tmp/tmp.aUQTIm3abu /tmp/tmp.AYsip21gJ3 + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.Hj9ECjA0DX ++ mktemp + local LAST_ERR=/tmp/tmp.OoFo73nPDT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Hj9ECjA0DX namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.OoFo73nPDT Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.Hj9ECjA0DX /tmp/tmp.OoFo73nPDT + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.KEjXVKujC2 ++ mktemp + local LAST_ERR=/tmp/tmp.DMvwZG7iYO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KEjXVKujC2 pod/cert-manager-cainjector-5dc9c8b4f7-ljs4p condition met pod/cert-manager-df4b69479-hnshp condition met pod/cert-manager-webhook-769bbb594d-lxkxq condition met + cat /tmp/tmp.DMvwZG7iYO + rm /tmp/tmp.KEjXVKujC2 /tmp/tmp.DMvwZG7iYO + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.vNj9mmS0HB ++ mktemp + local LAST_ERR=/tmp/tmp.9uk6h1Spvf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vNj9mmS0HB secret/some-users created + cat /tmp/tmp.9uk6h1Spvf + rm /tmp/tmp.vNj9mmS0HB /tmp/tmp.9uk6h1Spvf + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Pv3NjggZqV ++ mktemp + local LAST_ERR=/tmp/tmp.GSd7SHF2PF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Pv3NjggZqV deployment.apps/psmdb-client created + cat /tmp/tmp.GSd7SHF2PF + rm /tmp/tmp.Pv3NjggZqV /tmp/tmp.GSd7SHF2PF + return 0 + deploy_cmctl + local service_account=cmctl + /usr/sbin/sed -e s/percona-server-mongodb-operator/cmctl/g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/deploy/rbac.yaml + yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9mPfgQlntf ++ mktemp + local LAST_ERR=/tmp/tmp.pkRDbCJSDV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9mPfgQlntf role.rbac.authorization.k8s.io/cmctl created serviceaccount/cmctl created rolebinding.rbac.authorization.k8s.io/service-account-cmctl created + cat /tmp/tmp.pkRDbCJSDV + rm /tmp/tmp.9mPfgQlntf /tmp/tmp.pkRDbCJSDV + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/cmctl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qpjzLbXEe3 ++ mktemp + local LAST_ERR=/tmp/tmp.7Ez9UaBazN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/conf/cmctl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qpjzLbXEe3 deployment.apps/cmctl created + cat /tmp/tmp.7Ez9UaBazN + rm /tmp/tmp.qpjzLbXEe3 /tmp/tmp.7Ez9UaBazN + return 0 + desc 'create first PSMDB cluster 1.20.1 some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster 1.20.1 some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2044-6293f902"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.LMKRqclHUJ ++ mktemp + local LAST_ERR=/tmp/tmp.jIvWHfXpvq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LMKRqclHUJ perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.jIvWHfXpvq + rm /tmp/tmp.LMKRqclHUJ /tmp/tmp.jIvWHfXpvq + return 0 + desc 'check if Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod started ----------------------------------------------------------------------------------- + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u9xdlnxNt7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9zxAJPxqJX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u9xdlnxNt7 ++ cat /tmp/tmp.9zxAJPxqJX ++ rm /tmp/tmp.u9xdlnxNt7 /tmp/tmp.9zxAJPxqJX ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready................OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lyRRqodKrE +++ mktemp ++ local LAST_ERR=/tmp/tmp.mCQfRwaMBg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lyRRqodKrE ++ cat /tmp/tmp.mCQfRwaMBg ++ rm /tmp/tmp.lyRRqodKrE /tmp/tmp.mCQfRwaMBg ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iuXsWCY9pm +++ mktemp ++ local LAST_ERR=/tmp/tmp.WyN6Gcc47n ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iuXsWCY9pm ++ cat /tmp/tmp.WyN6Gcc47n ++ rm /tmp/tmp.iuXsWCY9pm /tmp/tmp.WyN6Gcc47n ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uvBXaASP4N +++ mktemp ++ local LAST_ERR=/tmp/tmp.aHi0V2L2q9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uvBXaASP4N ++ cat /tmp/tmp.aHi0V2L2q9 ++ rm /tmp/tmp.uvBXaASP4N /tmp/tmp.aHi0V2L2q9 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wGsmG3Y0So +++ mktemp ++ local LAST_ERR=/tmp/tmp.DBHEwaEjYW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wGsmG3Y0So ++ cat /tmp/tmp.DBHEwaEjYW ++ rm /tmp/tmp.wGsmG3Y0So /tmp/tmp.DBHEwaEjYW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fx1wWZiuMR +++ mktemp ++ local LAST_ERR=/tmp/tmp.jbrWeRGiLa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fx1wWZiuMR ++ cat /tmp/tmp.jbrWeRGiLa ++ rm /tmp/tmp.Fx1wWZiuMR /tmp/tmp.jbrWeRGiLa ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yd9THa4DCt +++ mktemp ++ local LAST_ERR=/tmp/tmp.fI5UJIfi2p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yd9THa4DCt ++ cat /tmp/tmp.fI5UJIfi2p ++ rm /tmp/tmp.yd9THa4DCt /tmp/tmp.fI5UJIfi2p ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R9OnBsiiwU +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Fg9FoVHeT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R9OnBsiiwU ++ cat /tmp/tmp.3Fg9FoVHeT ++ rm /tmp/tmp.R9OnBsiiwU /tmp/tmp.3Fg9FoVHeT ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tkoEQZqt7a +++ mktemp ++ local LAST_ERR=/tmp/tmp.Abg7nBFLR9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tkoEQZqt7a ++ cat /tmp/tmp.Abg7nBFLR9 ++ rm /tmp/tmp.tkoEQZqt7a /tmp/tmp.Abg7nBFLR9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gu1odJemH2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QTtqZQA4jN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gu1odJemH2 ++ cat /tmp/tmp.QTtqZQA4jN ++ rm /tmp/tmp.gu1odJemH2 /tmp/tmp.QTtqZQA4jN ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 1 statefulset some-name-rs0 + local generation=1 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vham1kzdTg +++ mktemp ++ local LAST_ERR=/tmp/tmp.oz7fPlW4le ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vham1kzdTg ++ cat /tmp/tmp.oz7fPlW4le ++ rm /tmp/tmp.vham1kzdTg /tmp/tmp.oz7fPlW4le ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset some-name-cfg + local generation=1 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Lv70ZZljUE +++ mktemp ++ local LAST_ERR=/tmp/tmp.cw1owtSA6h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Lv70ZZljUE ++ cat /tmp/tmp.cw1owtSA6h ++ rm /tmp/tmp.Lv70ZZljUE /tmp/tmp.cw1owtSA6h ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + sleep 20 + desc 'check if Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod started ----------------------------------------------------------------------------------- + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GNwaN6xGXe +++ mktemp ++ local LAST_ERR=/tmp/tmp.wSohEH21RH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GNwaN6xGXe ++ cat /tmp/tmp.wSohEH21RH ++ rm /tmp/tmp.GNwaN6xGXe /tmp/tmp.wSohEH21RH ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VHD6lCmpgg +++ mktemp ++ local LAST_ERR=/tmp/tmp.W10gTHZv6p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VHD6lCmpgg ++ cat /tmp/tmp.W10gTHZv6p ++ rm /tmp/tmp.VHD6lCmpgg /tmp/tmp.W10gTHZv6p ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UfrZe5WfoR +++ mktemp ++ local LAST_ERR=/tmp/tmp.5K4GDmT3Sg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UfrZe5WfoR ++ cat /tmp/tmp.5K4GDmT3Sg ++ rm /tmp/tmp.UfrZe5WfoR /tmp/tmp.5K4GDmT3Sg ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hdldiv06MM +++ mktemp ++ local LAST_ERR=/tmp/tmp.ExYHia0XKP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Hdldiv06MM ++ cat /tmp/tmp.ExYHia0XKP ++ rm /tmp/tmp.Hdldiv06MM /tmp/tmp.ExYHia0XKP ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zhemDP5KmT +++ mktemp ++ local LAST_ERR=/tmp/tmp.1sZUPJzAKa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zhemDP5KmT ++ cat /tmp/tmp.1sZUPJzAKa ++ rm /tmp/tmp.zhemDP5KmT /tmp/tmp.1sZUPJzAKa ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6gFUSxxscH +++ mktemp ++ local LAST_ERR=/tmp/tmp.iJUOzauiwP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6gFUSxxscH ++ cat /tmp/tmp.iJUOzauiwP ++ rm /tmp/tmp.6gFUSxxscH /tmp/tmp.iJUOzauiwP ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X7ocUL9WxF +++ mktemp ++ local LAST_ERR=/tmp/tmp.jRHkpaxEQj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X7ocUL9WxF ++ cat /tmp/tmp.jRHkpaxEQj ++ rm /tmp/tmp.X7ocUL9WxF /tmp/tmp.jRHkpaxEQj ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y4djLtdfYL +++ mktemp ++ local LAST_ERR=/tmp/tmp.ifDEeuTKlM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y4djLtdfYL ++ cat /tmp/tmp.ifDEeuTKlM ++ rm /tmp/tmp.y4djLtdfYL /tmp/tmp.ifDEeuTKlM ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jBexM9mwif +++ mktemp ++ local LAST_ERR=/tmp/tmp.YxsgYAukaI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jBexM9mwif ++ cat /tmp/tmp.YxsgYAukaI ++ rm /tmp/tmp.jBexM9mwif /tmp/tmp.YxsgYAukaI ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6rqSlfzgvK +++ mktemp ++ local LAST_ERR=/tmp/tmp.NJcWdk31rO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6rqSlfzgvK ++ cat /tmp/tmp.NJcWdk31rO ++ rm /tmp/tmp.6rqSlfzgvK /tmp/tmp.NJcWdk31rO ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1icapsz0pk +++ mktemp ++ local LAST_ERR=/tmp/tmp.hmnBFH6dQV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1icapsz0pk ++ cat /tmp/tmp.hmnBFH6dQV ++ rm /tmp/tmp.1icapsz0pk /tmp/tmp.hmnBFH6dQV ++ return 0 + pod_name=cmctl-7c7f6bf77d-rj2r4 + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cRpArTDfCi +++ mktemp ++ local LAST_ERR=/tmp/tmp.nwfjtVWz7Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cRpArTDfCi ++ cat /tmp/tmp.nwfjtVWz7Y ++ rm /tmp/tmp.cRpArTDfCi /tmp/tmp.nwfjtVWz7Y ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.AF7lfPZNe4 ++ mktemp + local LAST_ERR=/tmp/tmp.qrnNqUmhFk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AF7lfPZNe4 Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-22729/some-name-ssl + cat /tmp/tmp.qrnNqUmhFk + rm /tmp/tmp.AF7lfPZNe4 /tmp/tmp.qrnNqUmhFk + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O7w9FewapL +++ mktemp ++ local LAST_ERR=/tmp/tmp.yPtVSWIsua ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O7w9FewapL ++ cat /tmp/tmp.yPtVSWIsua ++ rm /tmp/tmp.O7w9FewapL /tmp/tmp.yPtVSWIsua ++ return 0 + new_revision=1 + '[' 2 == 1 ']' + sleep 1 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JFU4fVEfNi +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZSRFGPPDkc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JFU4fVEfNi ++ cat /tmp/tmp.ZSRFGPPDkc ++ rm /tmp/tmp.JFU4fVEfNi /tmp/tmp.ZSRFGPPDkc ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PRNQSVYsbN +++ mktemp ++ local LAST_ERR=/tmp/tmp.VjWGE7qc1T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PRNQSVYsbN ++ cat /tmp/tmp.VjWGE7qc1T ++ rm /tmp/tmp.PRNQSVYsbN /tmp/tmp.VjWGE7qc1T ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UxpWX2D8nv +++ mktemp ++ local LAST_ERR=/tmp/tmp.8j52iJtekD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UxpWX2D8nv ++ cat /tmp/tmp.8j52iJtekD ++ rm /tmp/tmp.UxpWX2D8nv /tmp/tmp.8j52iJtekD ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9LsT4DT09q +++ mktemp ++ local LAST_ERR=/tmp/tmp.rvUQMgXBTr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9LsT4DT09q ++ cat /tmp/tmp.rvUQMgXBTr ++ rm /tmp/tmp.9LsT4DT09q /tmp/tmp.rvUQMgXBTr ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................................................................................................................................................ + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TkCiksi7n3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EIyPXXGNiS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TkCiksi7n3 ++ cat /tmp/tmp.EIyPXXGNiS ++ rm /tmp/tmp.TkCiksi7n3 /tmp/tmp.EIyPXXGNiS ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.t7XwM0Go1i +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nxt4MkLnHz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t7XwM0Go1i ++ cat /tmp/tmp.Nxt4MkLnHz ++ rm /tmp/tmp.t7XwM0Go1i /tmp/tmp.Nxt4MkLnHz ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XZkYXMMXvI +++ mktemp ++ local LAST_ERR=/tmp/tmp.jWmqmdmSj8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XZkYXMMXvI ++ cat /tmp/tmp.jWmqmdmSj8 ++ rm /tmp/tmp.XZkYXMMXvI /tmp/tmp.jWmqmdmSj8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mSXeayDeYW +++ mktemp ++ local LAST_ERR=/tmp/tmp.gwgbuF84Ig ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mSXeayDeYW ++ cat /tmp/tmp.gwgbuF84Ig ++ rm /tmp/tmp.mSXeayDeYW /tmp/tmp.gwgbuF84Ig ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vczMEPjoT8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.10IQ156ywE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vczMEPjoT8 ++ cat /tmp/tmp.10IQ156ywE ++ rm /tmp/tmp.vczMEPjoT8 /tmp/tmp.10IQ156ywE ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8jFiACVvhh +++ mktemp ++ local LAST_ERR=/tmp/tmp.caF6A4T17c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8jFiACVvhh ++ cat /tmp/tmp.caF6A4T17c ++ rm /tmp/tmp.8jFiACVvhh /tmp/tmp.caF6A4T17c ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bqz8V0qs7r +++ mktemp ++ local LAST_ERR=/tmp/tmp.v8rvbf4bSp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bqz8V0qs7r ++ cat /tmp/tmp.v8rvbf4bSp ++ rm /tmp/tmp.Bqz8V0qs7r /tmp/tmp.v8rvbf4bSp ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 2 statefulset some-name-rs0 + local generation=2 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b5T9lNfIjN +++ mktemp ++ local LAST_ERR=/tmp/tmp.hovPDCypjp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b5T9lNfIjN ++ cat /tmp/tmp.hovPDCypjp ++ rm /tmp/tmp.b5T9lNfIjN /tmp/tmp.hovPDCypjp ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset some-name-cfg + local generation=2 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1JK9IP79EH +++ mktemp ++ local LAST_ERR=/tmp/tmp.QO5e1dftXh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1JK9IP79EH ++ cat /tmp/tmp.QO5e1dftXh ++ rm /tmp/tmp.1JK9IP79EH /tmp/tmp.QO5e1dftXh ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OZzqkQLUC7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pMUtifxmwt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OZzqkQLUC7 ++ cat /tmp/tmp.pMUtifxmwt ++ rm /tmp/tmp.OZzqkQLUC7 /tmp/tmp.pMUtifxmwt ++ return 0 + pod_name=cmctl-7c7f6bf77d-rj2r4 + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xz29wcHiGY +++ mktemp ++ local LAST_ERR=/tmp/tmp.DXXjbnlL8I ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xz29wcHiGY ++ cat /tmp/tmp.DXXjbnlL8I ++ rm /tmp/tmp.xz29wcHiGY /tmp/tmp.DXXjbnlL8I ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.MNz3B29yCk ++ mktemp + local LAST_ERR=/tmp/tmp.zYAtgUu5xa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MNz3B29yCk Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-22729/some-name-ssl-internal + cat /tmp/tmp.zYAtgUu5xa + rm /tmp/tmp.MNz3B29yCk /tmp/tmp.zYAtgUu5xa + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EPU7Luw0tJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6RuvaL7LAm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EPU7Luw0tJ ++ cat /tmp/tmp.6RuvaL7LAm ++ rm /tmp/tmp.EPU7Luw0tJ /tmp/tmp.6RuvaL7LAm ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.upGL8SB7xM +++ mktemp ++ local LAST_ERR=/tmp/tmp.biXyjJiQwU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.upGL8SB7xM ++ cat /tmp/tmp.biXyjJiQwU ++ rm /tmp/tmp.upGL8SB7xM /tmp/tmp.biXyjJiQwU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D98umYDKg6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AxY1ZOTrHD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D98umYDKg6 ++ cat /tmp/tmp.AxY1ZOTrHD ++ rm /tmp/tmp.D98umYDKg6 /tmp/tmp.AxY1ZOTrHD ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XVYBWJpvOr +++ mktemp ++ local LAST_ERR=/tmp/tmp.BESgVD2PSD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XVYBWJpvOr ++ cat /tmp/tmp.BESgVD2PSD ++ rm /tmp/tmp.XVYBWJpvOr /tmp/tmp.BESgVD2PSD ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................................................................................................................................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L9iwvAOkO9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SoIwiefLeM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L9iwvAOkO9 ++ cat /tmp/tmp.SoIwiefLeM ++ rm /tmp/tmp.L9iwvAOkO9 /tmp/tmp.SoIwiefLeM ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m7BqZxUtWR +++ mktemp ++ local LAST_ERR=/tmp/tmp.5Relng5pU5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m7BqZxUtWR ++ cat /tmp/tmp.5Relng5pU5 ++ rm /tmp/tmp.m7BqZxUtWR /tmp/tmp.5Relng5pU5 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4v7PxRpuAj +++ mktemp ++ local LAST_ERR=/tmp/tmp.lOYgORKu7P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4v7PxRpuAj ++ cat /tmp/tmp.lOYgORKu7P ++ rm /tmp/tmp.4v7PxRpuAj /tmp/tmp.lOYgORKu7P ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mJ0AyN8BsP +++ mktemp ++ local LAST_ERR=/tmp/tmp.LxtOgNLGCS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mJ0AyN8BsP ++ cat /tmp/tmp.LxtOgNLGCS ++ rm /tmp/tmp.mJ0AyN8BsP /tmp/tmp.LxtOgNLGCS ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dF2S27fqeS +++ mktemp ++ local LAST_ERR=/tmp/tmp.wRmXc7mQjq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dF2S27fqeS ++ cat /tmp/tmp.wRmXc7mQjq ++ rm /tmp/tmp.dF2S27fqeS /tmp/tmp.wRmXc7mQjq ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hKNVVQNqPm +++ mktemp ++ local LAST_ERR=/tmp/tmp.j5nwJOMWnj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hKNVVQNqPm ++ cat /tmp/tmp.j5nwJOMWnj ++ rm /tmp/tmp.hKNVVQNqPm /tmp/tmp.j5nwJOMWnj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zShyHQwB9v +++ mktemp ++ local LAST_ERR=/tmp/tmp.n0fB1qUEBZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zShyHQwB9v ++ cat /tmp/tmp.n0fB1qUEBZ ++ rm /tmp/tmp.zShyHQwB9v /tmp/tmp.n0fB1qUEBZ ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 3 statefulset some-name-rs0 + local generation=3 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rDHQt9LTS5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.o3AoYXn4vg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rDHQt9LTS5 ++ cat /tmp/tmp.o3AoYXn4vg ++ rm /tmp/tmp.rDHQt9LTS5 /tmp/tmp.o3AoYXn4vg ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + compare_generation 3 statefulset some-name-cfg + local generation=3 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kobngOhz2Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.YZwuaQHbwN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kobngOhz2Y ++ cat /tmp/tmp.YZwuaQHbwN ++ rm /tmp/tmp.kobngOhz2Y /tmp/tmp.YZwuaQHbwN ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/some-name-rs0 -1201 + local resource=service/some-name-rs0 + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201.yml + local new_result=/tmp/tmp.eGWk64sFEo/service_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201-oc.yml ']' + kubectl_bin get -o yaml service/some-name-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-22729", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.H0RZ6kYTXc ++ mktemp + local LAST_ERR=/tmp/tmp.ImtyHTTWOf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H0RZ6kYTXc + cat /tmp/tmp.ImtyHTTWOf + rm /tmp/tmp.H0RZ6kYTXc /tmp/tmp.ImtyHTTWOf + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.eGWk64sFEo/service_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.eGWk64sFEo/service_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.eGWk64sFEo/service_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201.yml /tmp/tmp.eGWk64sFEo/service_some-name-rs0.yml + log 'compare_kubectl: service/some-name-rs0 OK' + set +o xtrace [2025-11-17T15:16:38+0000] compare_kubectl: service/some-name-rs0 OK + compare_kubectl service/some-name-cfg -1201 + local resource=service/some-name-cfg + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201.yml + local new_result=/tmp/tmp.eGWk64sFEo/service_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201-oc.yml ']' + kubectl_bin get -o yaml service/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-22729", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.D0WgEvN3GF ++ mktemp + local LAST_ERR=/tmp/tmp.tkFjx3Ox3U + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.D0WgEvN3GF + cat /tmp/tmp.tkFjx3Ox3U + rm /tmp/tmp.D0WgEvN3GF /tmp/tmp.tkFjx3Ox3U + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.eGWk64sFEo/service_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.eGWk64sFEo/service_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.eGWk64sFEo/service_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201.yml /tmp/tmp.eGWk64sFEo/service_some-name-cfg.yml + log 'compare_kubectl: service/some-name-cfg OK' + set +o xtrace [2025-11-17T15:16:38+0000] compare_kubectl: service/some-name-cfg OK + compare_kubectl statefulset/some-name-rs0 -1201 + local resource=statefulset/some-name-rs0 + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201.yml + local new_result=/tmp/tmp.eGWk64sFEo/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-22729", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.LGjmPgdmLx ++ mktemp + local LAST_ERR=/tmp/tmp.7ZIAhkikee + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LGjmPgdmLx + cat /tmp/tmp.7ZIAhkikee + rm /tmp/tmp.LGjmPgdmLx /tmp/tmp.7ZIAhkikee + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.eGWk64sFEo/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.eGWk64sFEo/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.eGWk64sFEo/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201.yml /tmp/tmp.eGWk64sFEo/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-11-17T15:16:39+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg -1201 + local resource=statefulset/some-name-cfg + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201.yml + local new_result=/tmp/tmp.eGWk64sFEo/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-22729", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.1PeWopFMx7 ++ mktemp + local LAST_ERR=/tmp/tmp.l847uB9vHe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1PeWopFMx7 + cat /tmp/tmp.l847uB9vHe + rm /tmp/tmp.1PeWopFMx7 /tmp/tmp.l847uB9vHe + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.eGWk64sFEo/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.eGWk64sFEo/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.eGWk64sFEo/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2044/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201.yml /tmp/tmp.eGWk64sFEo/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-11-17T15:16:40+0000] compare_kubectl: statefulset/some-name-cfg OK + desc 'test 1.21.1' + set +o xtrace ----------------------------------------------------------------------------------- test 1.21.1 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"crVersion":"1.21.1"} }' ++ mktemp + local LAST_OUT=/tmp/tmp.dA1jixpm1W ++ mktemp + local LAST_ERR=/tmp/tmp.dj6NIPWRop + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"crVersion":"1.21.1"} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dA1jixpm1W perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.dj6NIPWRop + rm /tmp/tmp.dA1jixpm1W /tmp/tmp.dj6NIPWRop + return 0 + sleep 20 + desc 'check if Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod started ----------------------------------------------------------------------------------- + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y11kf7y8wJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.x7F3cINF3s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y11kf7y8wJ ++ cat /tmp/tmp.x7F3cINF3s ++ rm /tmp/tmp.Y11kf7y8wJ /tmp/tmp.x7F3cINF3s ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j9vzR2HDdo +++ mktemp ++ local LAST_ERR=/tmp/tmp.48l6PRVyBW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j9vzR2HDdo ++ cat /tmp/tmp.48l6PRVyBW ++ rm /tmp/tmp.j9vzR2HDdo /tmp/tmp.48l6PRVyBW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YCjmSqxUw2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PlrwcYLpzG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YCjmSqxUw2 ++ cat /tmp/tmp.PlrwcYLpzG ++ rm /tmp/tmp.YCjmSqxUw2 /tmp/tmp.PlrwcYLpzG ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................................................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oXJp16tdac +++ mktemp ++ local LAST_ERR=/tmp/tmp.Aywgd2Wber ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oXJp16tdac ++ cat /tmp/tmp.Aywgd2Wber ++ rm /tmp/tmp.oXJp16tdac /tmp/tmp.Aywgd2Wber ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gK1T1Gerjj +++ mktemp ++ local LAST_ERR=/tmp/tmp.rvudYEIBJu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gK1T1Gerjj ++ cat /tmp/tmp.rvudYEIBJu ++ rm /tmp/tmp.gK1T1Gerjj /tmp/tmp.rvudYEIBJu ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lcNBIdsXyW +++ mktemp ++ local LAST_ERR=/tmp/tmp.TGsJ2We55D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lcNBIdsXyW ++ cat /tmp/tmp.TGsJ2We55D ++ rm /tmp/tmp.lcNBIdsXyW /tmp/tmp.TGsJ2We55D ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P4kxdd5w8M +++ mktemp ++ local LAST_ERR=/tmp/tmp.XkjRwQXQcN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P4kxdd5w8M ++ cat /tmp/tmp.XkjRwQXQcN ++ rm /tmp/tmp.P4kxdd5w8M /tmp/tmp.XkjRwQXQcN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uarup4soFu +++ mktemp ++ local LAST_ERR=/tmp/tmp.ohPcmpRQuN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uarup4soFu ++ cat /tmp/tmp.ohPcmpRQuN ++ rm /tmp/tmp.Uarup4soFu /tmp/tmp.ohPcmpRQuN ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YhpH8kZ9UX +++ mktemp ++ local LAST_ERR=/tmp/tmp.bue6QScqFh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YhpH8kZ9UX ++ cat /tmp/tmp.bue6QScqFh ++ rm /tmp/tmp.YhpH8kZ9UX /tmp/tmp.bue6QScqFh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zPC9JNNwIo +++ mktemp ++ local LAST_ERR=/tmp/tmp.G4GWycHSYL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zPC9JNNwIo ++ cat /tmp/tmp.G4GWycHSYL ++ rm /tmp/tmp.zPC9JNNwIo /tmp/tmp.G4GWycHSYL ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 4 statefulset some-name-rs0 + local generation=4 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pXTE2PCNIL +++ mktemp ++ local LAST_ERR=/tmp/tmp.kKyHF6njGF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pXTE2PCNIL ++ cat /tmp/tmp.kKyHF6njGF ++ rm /tmp/tmp.pXTE2PCNIL /tmp/tmp.kKyHF6njGF ++ return 0 + current_generation=4 + [[ 4 != \4 ]] + compare_generation 4 statefulset some-name-cfg + local generation=4 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dGQyNlXgFC +++ mktemp ++ local LAST_ERR=/tmp/tmp.qfEdt0cnDk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dGQyNlXgFC ++ cat /tmp/tmp.qfEdt0cnDk ++ rm /tmp/tmp.dGQyNlXgFC /tmp/tmp.qfEdt0cnDk ++ return 0 + current_generation=4 + [[ 4 != \4 ]] + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KE36Ev55Cf +++ mktemp ++ local LAST_ERR=/tmp/tmp.9CMAf8okvE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KE36Ev55Cf ++ cat /tmp/tmp.9CMAf8okvE ++ rm /tmp/tmp.KE36Ev55Cf /tmp/tmp.9CMAf8okvE ++ return 0 + pod_name=cmctl-7c7f6bf77d-rj2r4 + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mfIkgaSHl3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ND4Ubd8zbp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mfIkgaSHl3 ++ cat /tmp/tmp.ND4Ubd8zbp ++ rm /tmp/tmp.mfIkgaSHl3 /tmp/tmp.ND4Ubd8zbp ++ return 0 + revision=2 + kubectl_bin exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.Ve9g4YOiJf ++ mktemp + local LAST_ERR=/tmp/tmp.q16kWILNic + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ve9g4YOiJf Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-22729/some-name-ssl + cat /tmp/tmp.q16kWILNic + rm /tmp/tmp.Ve9g4YOiJf /tmp/tmp.q16kWILNic + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FunJm43IhK +++ mktemp ++ local LAST_ERR=/tmp/tmp.vkqeff7IZL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FunJm43IhK ++ cat /tmp/tmp.vkqeff7IZL ++ rm /tmp/tmp.FunJm43IhK /tmp/tmp.vkqeff7IZL ++ return 0 + new_revision=2 + '[' 3 == 2 ']' + sleep 1 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2cjQ7qP0qW +++ mktemp ++ local LAST_ERR=/tmp/tmp.hBcvdGvwr3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2cjQ7qP0qW ++ cat /tmp/tmp.hBcvdGvwr3 ++ rm /tmp/tmp.2cjQ7qP0qW /tmp/tmp.hBcvdGvwr3 ++ return 0 + new_revision=3 + '[' 3 == 3 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PknaYSiZWm +++ mktemp ++ local LAST_ERR=/tmp/tmp.fsc7q9C3hi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PknaYSiZWm ++ cat /tmp/tmp.fsc7q9C3hi ++ rm /tmp/tmp.PknaYSiZWm /tmp/tmp.fsc7q9C3hi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NReYPEvVqT +++ mktemp ++ local LAST_ERR=/tmp/tmp.g7KHYRpB75 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NReYPEvVqT ++ cat /tmp/tmp.g7KHYRpB75 ++ rm /tmp/tmp.NReYPEvVqT /tmp/tmp.g7KHYRpB75 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gv5fYHEPod +++ mktemp ++ local LAST_ERR=/tmp/tmp.uIpVW7Ia9y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gv5fYHEPod ++ cat /tmp/tmp.uIpVW7Ia9y ++ rm /tmp/tmp.Gv5fYHEPod /tmp/tmp.uIpVW7Ia9y ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................................................................................................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tFem482mfe +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cng6AXOdCb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tFem482mfe ++ cat /tmp/tmp.Cng6AXOdCb ++ rm /tmp/tmp.tFem482mfe /tmp/tmp.Cng6AXOdCb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7hGXIcbDDN +++ mktemp ++ local LAST_ERR=/tmp/tmp.3443x2ovO7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7hGXIcbDDN ++ cat /tmp/tmp.3443x2ovO7 ++ rm /tmp/tmp.7hGXIcbDDN /tmp/tmp.3443x2ovO7 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.45iRTfBVj6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tNTPtb6NGa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.45iRTfBVj6 ++ cat /tmp/tmp.tNTPtb6NGa ++ rm /tmp/tmp.45iRTfBVj6 /tmp/tmp.tNTPtb6NGa ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e7pr5kDosW +++ mktemp ++ local LAST_ERR=/tmp/tmp.s4whJJJdhl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e7pr5kDosW ++ cat /tmp/tmp.s4whJJJdhl ++ rm /tmp/tmp.e7pr5kDosW /tmp/tmp.s4whJJJdhl ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OTe2EljmbJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.SEa8fI8jny ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OTe2EljmbJ ++ cat /tmp/tmp.SEa8fI8jny ++ rm /tmp/tmp.OTe2EljmbJ /tmp/tmp.SEa8fI8jny ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ygcxYcz5Jl +++ mktemp ++ local LAST_ERR=/tmp/tmp.QdU6dnoVrv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ygcxYcz5Jl ++ cat /tmp/tmp.QdU6dnoVrv ++ rm /tmp/tmp.ygcxYcz5Jl /tmp/tmp.QdU6dnoVrv ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TqPgLeroSW +++ mktemp ++ local LAST_ERR=/tmp/tmp.45x79GnNqc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TqPgLeroSW ++ cat /tmp/tmp.45x79GnNqc ++ rm /tmp/tmp.TqPgLeroSW /tmp/tmp.45x79GnNqc ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 5 statefulset some-name-rs0 + local generation=5 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R8vJsHIHBW +++ mktemp ++ local LAST_ERR=/tmp/tmp.PoK9WFaFnw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R8vJsHIHBW ++ cat /tmp/tmp.PoK9WFaFnw ++ rm /tmp/tmp.R8vJsHIHBW /tmp/tmp.PoK9WFaFnw ++ return 0 + current_generation=5 + [[ 5 != \5 ]] + compare_generation 5 statefulset some-name-cfg + local generation=5 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8GH1nypaB6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YNcWRVK47y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8GH1nypaB6 ++ cat /tmp/tmp.YNcWRVK47y ++ rm /tmp/tmp.8GH1nypaB6 /tmp/tmp.YNcWRVK47y ++ return 0 + current_generation=5 + [[ 5 != \5 ]] + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GcGkz6xFhc +++ mktemp ++ local LAST_ERR=/tmp/tmp.3ia677faNs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GcGkz6xFhc ++ cat /tmp/tmp.3ia677faNs ++ rm /tmp/tmp.GcGkz6xFhc /tmp/tmp.3ia677faNs ++ return 0 + pod_name=cmctl-7c7f6bf77d-rj2r4 + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PajT91DCz4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qFEiaG72Eh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PajT91DCz4 ++ cat /tmp/tmp.qFEiaG72Eh ++ rm /tmp/tmp.PajT91DCz4 /tmp/tmp.qFEiaG72Eh ++ return 0 + revision=2 + kubectl_bin exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.nAsRCYulXf ++ mktemp + local LAST_ERR=/tmp/tmp.AQeq1R4sS1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-rj2r4 -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nAsRCYulXf Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-22729/some-name-ssl-internal + cat /tmp/tmp.AQeq1R4sS1 + rm /tmp/tmp.nAsRCYulXf /tmp/tmp.AQeq1R4sS1 + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E8syqaBaGK +++ mktemp ++ local LAST_ERR=/tmp/tmp.EKUc37TquS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E8syqaBaGK ++ cat /tmp/tmp.EKUc37TquS ++ rm /tmp/tmp.E8syqaBaGK /tmp/tmp.EKUc37TquS ++ return 0 + new_revision=2 + '[' 3 == 2 ']' + sleep 1 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KmOUQmctUp +++ mktemp ++ local LAST_ERR=/tmp/tmp.tQy5uI6JcT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KmOUQmctUp ++ cat /tmp/tmp.tQy5uI6JcT ++ rm /tmp/tmp.KmOUQmctUp /tmp/tmp.tQy5uI6JcT ++ return 0 + new_revision=3 + '[' 3 == 3 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Jt8dl4K8ug +++ mktemp ++ local LAST_ERR=/tmp/tmp.91xsiQRNJA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Jt8dl4K8ug ++ cat /tmp/tmp.91xsiQRNJA ++ rm /tmp/tmp.Jt8dl4K8ug /tmp/tmp.91xsiQRNJA ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VZYuBVtPGe +++ mktemp ++ local LAST_ERR=/tmp/tmp.9ZHmM1Ftv3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VZYuBVtPGe ++ cat /tmp/tmp.9ZHmM1Ftv3 ++ rm /tmp/tmp.VZYuBVtPGe /tmp/tmp.9ZHmM1Ftv3 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VwRjfdOSC0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jZTuR8VFTJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VwRjfdOSC0 ++ cat /tmp/tmp.jZTuR8VFTJ ++ rm /tmp/tmp.VwRjfdOSC0 /tmp/tmp.jZTuR8VFTJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................................................................................................................................................Terminated