Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/logs/upgrade-consistency-sharded-tls.log grep: warning: stray \ before - Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + CLUSTER=some-name + main + create_infra upgrade-consistency-sharded-tls-18466 + local ns=upgrade-consistency-sharded-tls-18466 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.wdvhb0wlCO ++ mktemp + local LAST_ERR=/tmp/tmp.0znwX8uCEw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wdvhb0wlCO customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.0znwX8uCEw + rm /tmp/tmp.wdvhb0wlCO /tmp/tmp.0znwX8uCEw + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.mQwy9nJdNq ++ mktemp + local LAST_ERR=/tmp/tmp.05N60Az9Zj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mQwy9nJdNq + cat /tmp/tmp.05N60Az9Zj + rm /tmp/tmp.mQwy9nJdNq /tmp/tmp.05N60Az9Zj + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ZPXeNajlvQ ++ mktemp + local LAST_ERR=/tmp/tmp.GXOXDJRCWK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZPXeNajlvQ + cat /tmp/tmp.GXOXDJRCWK + rm /tmp/tmp.ZPXeNajlvQ /tmp/tmp.GXOXDJRCWK + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n upgrade-consistency-sharded-tls-10336 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dEdOrY3y68 ++ mktemp + local LAST_ERR=/tmp/tmp.lwRAMbvuxC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dEdOrY3y68 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.lwRAMbvuxC + rm /tmp/tmp.dEdOrY3y68 /tmp/tmp.lwRAMbvuxC + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.OZXq1JJAJf ++ mktemp + local LAST_ERR=/tmp/tmp.HZ2MygNOFc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OZXq1JJAJf clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.HZ2MygNOFc + rm /tmp/tmp.OZXq1JJAJf /tmp/tmp.HZ2MygNOFc + return 0 + check_crd_for_deletion PR-2152-4461031b + local git_tag=PR-2152-4461031b ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2152-4461031b/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9ej11yKHLY +++ mktemp ++ local LAST_ERR=/tmp/tmp.XU31Py0mNC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.9ej11yKHLY ++ cat /tmp/tmp.XU31Py0mNC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.9ej11yKHLY ++ cat /tmp/tmp.XU31Py0mNC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.9ej11yKHLY ++ cat /tmp/tmp.XU31Py0mNC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.9ej11yKHLY ++ cat /tmp/tmp.XU31Py0mNC Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.9ej11yKHLY /tmp/tmp.XU31Py0mNC ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.gGytgDQIGs ++ mktemp + local LAST_OUT=/tmp/tmp.SGurpiLpUj ++ mktemp + local LAST_ERR=/tmp/tmp.D55IyPqjSz + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.hQePQuxqRx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gGytgDQIGs + cat /tmp/tmp.D55IyPqjSz + rm /tmp/tmp.gGytgDQIGs /tmp/tmp.D55IyPqjSz + return 0 namespace "cert-manager" deleted namespace "upgrade-consistency-sharded-tls-10336" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SGurpiLpUj namespace "psmdb-operator" deleted + cat /tmp/tmp.hQePQuxqRx + rm /tmp/tmp.SGurpiLpUj /tmp/tmp.hQePQuxqRx + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jOC4jqHRqr ++ mktemp + local LAST_ERR=/tmp/tmp.pfBmIdHiDP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jOC4jqHRqr + cat /tmp/tmp.pfBmIdHiDP + rm /tmp/tmp.jOC4jqHRqr /tmp/tmp.pfBmIdHiDP + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.5I6ze63HIa ++ mktemp + local LAST_ERR=/tmp/tmp.5xnmle5w6w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5I6ze63HIa namespace/psmdb-operator created + cat /tmp/tmp.5xnmle5w6w + rm /tmp/tmp.5I6ze63HIa /tmp/tmp.5xnmle5w6w + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.OgZUWARFkY +++ mktemp ++ local LAST_ERR=/tmp/tmp.fPIVAIUtyG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OgZUWARFkY ++ cat /tmp/tmp.fPIVAIUtyG ++ rm /tmp/tmp.OgZUWARFkY /tmp/tmp.fPIVAIUtyG ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.DB2UTh6XTy ++ mktemp + local LAST_ERR=/tmp/tmp.cKR2zAQyYo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DB2UTh6XTy Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster8" modified. + cat /tmp/tmp.cKR2zAQyYo + rm /tmp/tmp.DB2UTh6XTy /tmp/tmp.cKR2zAQyYo + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2152-4461031b' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2152-4461031b ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.QtO9AWVSMF ++ mktemp + local LAST_ERR=/tmp/tmp.phPspaCw0O + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QtO9AWVSMF customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.phPspaCw0O + rm /tmp/tmp.QtO9AWVSMF /tmp/tmp.phPspaCw0O + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.p0mmJbuXGj ++ mktemp + local LAST_ERR=/tmp/tmp.f50EA20GFf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p0mmJbuXGj clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.f50EA20GFf + rm /tmp/tmp.p0mmJbuXGj /tmp/tmp.f50EA20GFf + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2152-4461031b") | + kubectl_bin apply -n psmdb-operator -f - ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ESQxtmtRBp ++ mktemp + local LAST_ERR=/tmp/tmp.0oZEzaOoV1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ESQxtmtRBp deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.0oZEzaOoV1 + rm /tmp/tmp.ESQxtmtRBp /tmp/tmp.0oZEzaOoV1 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.s7yVQFi5yL +++ mktemp ++ local LAST_ERR=/tmp/tmp.vs1VLEMwnp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.s7yVQFi5yL ++ cat /tmp/tmp.vs1VLEMwnp ++ rm /tmp/tmp.s7yVQFi5yL /tmp/tmp.vs1VLEMwnp ++ return 0 + wait_operator_pod percona-server-mongodb-operator-78c57f9588-9h7rl + local pod=percona-server-mongodb-operator-78c57f9588-9h7rl + set +o xtrace waiting for pod/percona-server-mongodb-operator-78c57f9588-9h7rl to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.hfBbr5EKOg +++ mktemp ++ local LAST_ERR=/tmp/tmp.uhLibXYGCe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hfBbr5EKOg ++ cat /tmp/tmp.uhLibXYGCe ++ rm /tmp/tmp.hfBbr5EKOg /tmp/tmp.uhLibXYGCe ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-78c57f9588-9h7rl ++ mktemp + local LAST_OUT=/tmp/tmp.60anMhE8Hu ++ mktemp + local LAST_ERR=/tmp/tmp.KABNfWqHwr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-78c57f9588-9h7rl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.60anMhE8Hu + cat /tmp/tmp.KABNfWqHwr + rm /tmp/tmp.60anMhE8Hu /tmp/tmp.KABNfWqHwr + return 0 2025-12-18T20:14:16.710Z INFO setup Manager starting up {"gitCommit": "4461031b25c11e5b29d77d465c058b936127aac2", "gitBranch": "PR-2152-4461031b", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace upgrade-consistency-sharded-tls-18466 + local namespace=upgrade-consistency-sharded-tls-18466 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces upgrade-consistency-sharded-tls-18466' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces upgrade-consistency-sharded-tls-18466 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace upgrade-consistency-sharded-tls-18466 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.hdMjSvEvYo egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.IJt8onzCVc ++ mktemp + local LAST_ERR=/tmp/tmp.ni9n5brvMt + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.yWIIRWqQuG + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace upgrade-consistency-sharded-tls-18466 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hdMjSvEvYo + cat /tmp/tmp.ni9n5brvMt + rm /tmp/tmp.hdMjSvEvYo /tmp/tmp.ni9n5brvMt + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IJt8onzCVc + cat /tmp/tmp.yWIIRWqQuG + rm /tmp/tmp.IJt8onzCVc /tmp/tmp.yWIIRWqQuG + return 0 + kubectl_bin wait --for=delete namespace upgrade-consistency-sharded-tls-18466 ++ mktemp + local LAST_OUT=/tmp/tmp.uq8aDAHIWv ++ mktemp + local LAST_ERR=/tmp/tmp.BPbsWbaOJG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace upgrade-consistency-sharded-tls-18466 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uq8aDAHIWv + cat /tmp/tmp.BPbsWbaOJG + rm /tmp/tmp.uq8aDAHIWv /tmp/tmp.BPbsWbaOJG + return 0 + desc 'create namespace upgrade-consistency-sharded-tls-18466' + set +o xtrace ----------------------------------------------------------------------------------- create namespace upgrade-consistency-sharded-tls-18466 ----------------------------------------------------------------------------------- + kubectl_bin create namespace upgrade-consistency-sharded-tls-18466 ++ mktemp + local LAST_OUT=/tmp/tmp.sWLLYHl00d ++ mktemp + local LAST_ERR=/tmp/tmp.E2GeUJUkWE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace upgrade-consistency-sharded-tls-18466 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sWLLYHl00d namespace/upgrade-consistency-sharded-tls-18466 created + cat /tmp/tmp.E2GeUJUkWE + rm /tmp/tmp.sWLLYHl00d /tmp/tmp.E2GeUJUkWE + return 0 + set_kube_ctx upgrade-consistency-sharded-tls-18466 + local namespace=upgrade-consistency-sharded-tls-18466 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.OzUHYC0bHd +++ mktemp ++ local LAST_ERR=/tmp/tmp.h0auUOrsLd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OzUHYC0bHd ++ cat /tmp/tmp.h0auUOrsLd ++ rm /tmp/tmp.OzUHYC0bHd /tmp/tmp.h0auUOrsLd ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster8 --namespace=upgrade-consistency-sharded-tls-18466 ++ mktemp + local LAST_OUT=/tmp/tmp.4yXbM81fkV ++ mktemp + local LAST_ERR=/tmp/tmp.73bkdq25iK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster8 --namespace=upgrade-consistency-sharded-tls-18466 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4yXbM81fkV Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2152-4461031b-2-cluster8" modified. + cat /tmp/tmp.73bkdq25iK + rm /tmp/tmp.4yXbM81fkV /tmp/tmp.73bkdq25iK + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.pUbxMbOqj9 ++ mktemp + local LAST_ERR=/tmp/tmp.cVpnxkLhn7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pUbxMbOqj9 namespace/cert-manager created + cat /tmp/tmp.cVpnxkLhn7 + rm /tmp/tmp.pUbxMbOqj9 /tmp/tmp.cVpnxkLhn7 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.TUjeT1qpBr ++ mktemp + local LAST_ERR=/tmp/tmp.q6Z5qkDLO5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TUjeT1qpBr namespace/cert-manager labeled + cat /tmp/tmp.q6Z5qkDLO5 + rm /tmp/tmp.TUjeT1qpBr /tmp/tmp.q6Z5qkDLO5 + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.ANIRCjyOzX ++ mktemp + local LAST_ERR=/tmp/tmp.ZSm5Eo4DvT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ANIRCjyOzX namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.ZSm5Eo4DvT Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.ANIRCjyOzX /tmp/tmp.ZSm5Eo4DvT + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.SBv95FefT5 ++ mktemp + local LAST_ERR=/tmp/tmp.BBIBKyyhBo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SBv95FefT5 pod/cert-manager-cainjector-5dc9c8b4f7-tws5w condition met pod/cert-manager-df4b69479-g9sqs condition met pod/cert-manager-webhook-769bbb594d-4tnc5 condition met + cat /tmp/tmp.BBIBKyyhBo + rm /tmp/tmp.SBv95FefT5 /tmp/tmp.BBIBKyyhBo + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.XYiE4wJ737 ++ mktemp + local LAST_ERR=/tmp/tmp.qFTT1WVj4B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XYiE4wJ737 secret/some-users created + cat /tmp/tmp.qFTT1WVj4B + rm /tmp/tmp.XYiE4wJ737 /tmp/tmp.qFTT1WVj4B + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/client_with_tls.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Cl2Z9ylrnZ ++ mktemp + local LAST_ERR=/tmp/tmp.tQozVVOf8I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/client_with_tls.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cl2Z9ylrnZ deployment.apps/psmdb-client created + cat /tmp/tmp.tQozVVOf8I + rm /tmp/tmp.Cl2Z9ylrnZ /tmp/tmp.tQozVVOf8I + return 0 + deploy_cmctl + local service_account=cmctl + /usr/sbin/sed -e s/percona-server-mongodb-operator/cmctl/g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/rbac.yaml + yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.s5ZZve2OyV ++ mktemp + local LAST_ERR=/tmp/tmp.4AvjHil6xD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s5ZZve2OyV role.rbac.authorization.k8s.io/cmctl created serviceaccount/cmctl created rolebinding.rbac.authorization.k8s.io/service-account-cmctl created + cat /tmp/tmp.4AvjHil6xD + rm /tmp/tmp.s5ZZve2OyV /tmp/tmp.4AvjHil6xD + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/cmctl.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qdEyAMkh0O ++ mktemp + local LAST_ERR=/tmp/tmp.ekjNWwnUbd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/conf/cmctl.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qdEyAMkh0O deployment.apps/cmctl created + cat /tmp/tmp.ekjNWwnUbd + rm /tmp/tmp.qdEyAMkh0O /tmp/tmp.ekjNWwnUbd + return 0 + desc 'create first PSMDB cluster 1.20.1 some-name' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster 1.20.1 some-name ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/conf/some-name.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2152-4461031b"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.pl70Ue7ChI + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.KAAfPQDouP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pl70Ue7ChI perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.KAAfPQDouP + rm /tmp/tmp.pl70Ue7ChI /tmp/tmp.KAAfPQDouP + return 0 + desc 'check if Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod started ----------------------------------------------------------------------------------- + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vb4YC5CaM4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Mg1gyWK7Q3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vb4YC5CaM4 ++ cat /tmp/tmp.Mg1gyWK7Q3 ++ rm /tmp/tmp.Vb4YC5CaM4 /tmp/tmp.Mg1gyWK7Q3 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SRiwXiScBA +++ mktemp ++ local LAST_ERR=/tmp/tmp.ttClj3m3nl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SRiwXiScBA ++ cat /tmp/tmp.ttClj3m3nl ++ rm /tmp/tmp.SRiwXiScBA /tmp/tmp.ttClj3m3nl ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dBINsk7ymY +++ mktemp ++ local LAST_ERR=/tmp/tmp.c3SbMTqW7W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dBINsk7ymY ++ cat /tmp/tmp.c3SbMTqW7W ++ rm /tmp/tmp.dBINsk7ymY /tmp/tmp.c3SbMTqW7W ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............................. + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LVV7K7lvQ2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8VDim0aRDu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LVV7K7lvQ2 ++ cat /tmp/tmp.8VDim0aRDu ++ rm /tmp/tmp.LVV7K7lvQ2 /tmp/tmp.8VDim0aRDu ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wPTdZzGduQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.hEInD1Ncro ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wPTdZzGduQ ++ cat /tmp/tmp.hEInD1Ncro ++ rm /tmp/tmp.wPTdZzGduQ /tmp/tmp.hEInD1Ncro ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n3CqMvfYRB +++ mktemp ++ local LAST_ERR=/tmp/tmp.1e06VEB0A3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n3CqMvfYRB ++ cat /tmp/tmp.1e06VEB0A3 ++ rm /tmp/tmp.n3CqMvfYRB /tmp/tmp.1e06VEB0A3 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mYXbfdeVcV +++ mktemp ++ local LAST_ERR=/tmp/tmp.fTn8Rpd9Eb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mYXbfdeVcV ++ cat /tmp/tmp.fTn8Rpd9Eb ++ rm /tmp/tmp.mYXbfdeVcV /tmp/tmp.fTn8Rpd9Eb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mzhpFZKBLe +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZCXkQDygKX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mzhpFZKBLe ++ cat /tmp/tmp.ZCXkQDygKX ++ rm /tmp/tmp.mzhpFZKBLe /tmp/tmp.ZCXkQDygKX ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k0MITSfeXW +++ mktemp ++ local LAST_ERR=/tmp/tmp.anCpTbM2Mi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k0MITSfeXW ++ cat /tmp/tmp.anCpTbM2Mi ++ rm /tmp/tmp.k0MITSfeXW /tmp/tmp.anCpTbM2Mi ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wuFL4hRpvZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.p4IfWSIqzW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wuFL4hRpvZ ++ cat /tmp/tmp.p4IfWSIqzW ++ rm /tmp/tmp.wuFL4hRpvZ /tmp/tmp.p4IfWSIqzW ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 1 statefulset some-name-rs0 + local generation=1 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nxYTrQp4ah +++ mktemp ++ local LAST_ERR=/tmp/tmp.AWYcJgZKUg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nxYTrQp4ah ++ cat /tmp/tmp.AWYcJgZKUg ++ rm /tmp/tmp.nxYTrQp4ah /tmp/tmp.AWYcJgZKUg ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + compare_generation 1 statefulset some-name-cfg + local generation=1 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LUyxkjF4uT +++ mktemp ++ local LAST_ERR=/tmp/tmp.rckkmB7cfu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LUyxkjF4uT ++ cat /tmp/tmp.rckkmB7cfu ++ rm /tmp/tmp.LUyxkjF4uT /tmp/tmp.rckkmB7cfu ++ return 0 + current_generation=1 + [[ 1 != \1 ]] + sleep 20 + desc 'check if Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod started ----------------------------------------------------------------------------------- + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tMJy9Gi6wH +++ mktemp ++ local LAST_ERR=/tmp/tmp.TuYLEPRJhi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tMJy9Gi6wH ++ cat /tmp/tmp.TuYLEPRJhi ++ rm /tmp/tmp.tMJy9Gi6wH /tmp/tmp.TuYLEPRJhi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.js3aDVSqmY +++ mktemp ++ local LAST_ERR=/tmp/tmp.1rHPSlPFiv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.js3aDVSqmY ++ cat /tmp/tmp.1rHPSlPFiv ++ rm /tmp/tmp.js3aDVSqmY /tmp/tmp.1rHPSlPFiv ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.79mf7XKOqw +++ mktemp ++ local LAST_ERR=/tmp/tmp.dbAPv00UjN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.79mf7XKOqw ++ cat /tmp/tmp.dbAPv00UjN ++ rm /tmp/tmp.79mf7XKOqw /tmp/tmp.dbAPv00UjN ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K2lagiWsti +++ mktemp ++ local LAST_ERR=/tmp/tmp.p57xP3WNqW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K2lagiWsti ++ cat /tmp/tmp.p57xP3WNqW ++ rm /tmp/tmp.K2lagiWsti /tmp/tmp.p57xP3WNqW ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8wiRVfqRTm +++ mktemp ++ local LAST_ERR=/tmp/tmp.PFwmdTvICc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8wiRVfqRTm ++ cat /tmp/tmp.PFwmdTvICc ++ rm /tmp/tmp.8wiRVfqRTm /tmp/tmp.PFwmdTvICc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UWwFg99NVA +++ mktemp ++ local LAST_ERR=/tmp/tmp.EVaiQtgNi2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UWwFg99NVA ++ cat /tmp/tmp.EVaiQtgNi2 ++ rm /tmp/tmp.UWwFg99NVA /tmp/tmp.EVaiQtgNi2 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sVkQu3Ogt4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.d2RRguXIGz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sVkQu3Ogt4 ++ cat /tmp/tmp.d2RRguXIGz ++ rm /tmp/tmp.sVkQu3Ogt4 /tmp/tmp.d2RRguXIGz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d0uVRiRZYx +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZtqFMrSZ69 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d0uVRiRZYx ++ cat /tmp/tmp.ZtqFMrSZ69 ++ rm /tmp/tmp.d0uVRiRZYx /tmp/tmp.ZtqFMrSZ69 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SaMjUXTh97 +++ mktemp ++ local LAST_ERR=/tmp/tmp.O3kbRhnJyx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SaMjUXTh97 ++ cat /tmp/tmp.O3kbRhnJyx ++ rm /tmp/tmp.SaMjUXTh97 /tmp/tmp.O3kbRhnJyx ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yEGTxR4Z7P +++ mktemp ++ local LAST_ERR=/tmp/tmp.uyCoZBcQct ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yEGTxR4Z7P ++ cat /tmp/tmp.uyCoZBcQct ++ rm /tmp/tmp.yEGTxR4Z7P /tmp/tmp.uyCoZBcQct ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZhNZ6a2Dew +++ mktemp ++ local LAST_ERR=/tmp/tmp.AuThgKQbD8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZhNZ6a2Dew ++ cat /tmp/tmp.AuThgKQbD8 ++ rm /tmp/tmp.ZhNZ6a2Dew /tmp/tmp.AuThgKQbD8 ++ return 0 + pod_name=cmctl-7c7f6bf77d-4qbg8 + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cepuXJbrTT +++ mktemp ++ local LAST_ERR=/tmp/tmp.6BXo2PSN99 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cepuXJbrTT ++ cat /tmp/tmp.6BXo2PSN99 ++ rm /tmp/tmp.cepuXJbrTT /tmp/tmp.6BXo2PSN99 ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.CQs73RQBE5 ++ mktemp + local LAST_ERR=/tmp/tmp.nYmAXYK40K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CQs73RQBE5 Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-18466/some-name-ssl + cat /tmp/tmp.nYmAXYK40K + rm /tmp/tmp.CQs73RQBE5 /tmp/tmp.nYmAXYK40K + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CvO4ledm9K +++ mktemp ++ local LAST_ERR=/tmp/tmp.yGPGo6sLK5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CvO4ledm9K ++ cat /tmp/tmp.yGPGo6sLK5 ++ rm /tmp/tmp.CvO4ledm9K /tmp/tmp.yGPGo6sLK5 ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ha5O6bpD5z +++ mktemp ++ local LAST_ERR=/tmp/tmp.qHwSBdUpBr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ha5O6bpD5z ++ cat /tmp/tmp.qHwSBdUpBr ++ rm /tmp/tmp.ha5O6bpD5z /tmp/tmp.qHwSBdUpBr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.05h4qbCFWg +++ mktemp ++ local LAST_ERR=/tmp/tmp.ROyIfiXpMr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.05h4qbCFWg ++ cat /tmp/tmp.ROyIfiXpMr ++ rm /tmp/tmp.05h4qbCFWg /tmp/tmp.ROyIfiXpMr ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M0fvq8mIag +++ mktemp ++ local LAST_ERR=/tmp/tmp.wcjxGfEToo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M0fvq8mIag ++ cat /tmp/tmp.wcjxGfEToo ++ rm /tmp/tmp.M0fvq8mIag /tmp/tmp.wcjxGfEToo ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................................................................................................ + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8WJJaYSs9q +++ mktemp ++ local LAST_ERR=/tmp/tmp.BWkevthduX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8WJJaYSs9q ++ cat /tmp/tmp.BWkevthduX ++ rm /tmp/tmp.8WJJaYSs9q /tmp/tmp.BWkevthduX ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FhV6IGcLK4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qz6P3eGZdB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FhV6IGcLK4 ++ cat /tmp/tmp.qz6P3eGZdB ++ rm /tmp/tmp.FhV6IGcLK4 /tmp/tmp.qz6P3eGZdB ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r6kXJAndvM +++ mktemp ++ local LAST_ERR=/tmp/tmp.8GsuaJWjjB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r6kXJAndvM ++ cat /tmp/tmp.8GsuaJWjjB ++ rm /tmp/tmp.r6kXJAndvM /tmp/tmp.8GsuaJWjjB ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lKsdzYaDlP +++ mktemp ++ local LAST_ERR=/tmp/tmp.IcXDgP2ZOb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lKsdzYaDlP ++ cat /tmp/tmp.IcXDgP2ZOb ++ rm /tmp/tmp.lKsdzYaDlP /tmp/tmp.IcXDgP2ZOb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ftIRfN2aH2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FWHOsgGdJ0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ftIRfN2aH2 ++ cat /tmp/tmp.FWHOsgGdJ0 ++ rm /tmp/tmp.ftIRfN2aH2 /tmp/tmp.FWHOsgGdJ0 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4Ti833FqC2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rhi9Xx07gy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4Ti833FqC2 ++ cat /tmp/tmp.Rhi9Xx07gy ++ rm /tmp/tmp.4Ti833FqC2 /tmp/tmp.Rhi9Xx07gy ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UhH3Nbly3n +++ mktemp ++ local LAST_ERR=/tmp/tmp.QFJdfQsFjH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UhH3Nbly3n ++ cat /tmp/tmp.QFJdfQsFjH ++ rm /tmp/tmp.UhH3Nbly3n /tmp/tmp.QFJdfQsFjH ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 2 statefulset some-name-rs0 + local generation=2 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UIGm21j9nK +++ mktemp ++ local LAST_ERR=/tmp/tmp.SjW6O0Rx6i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UIGm21j9nK ++ cat /tmp/tmp.SjW6O0Rx6i ++ rm /tmp/tmp.UIGm21j9nK /tmp/tmp.SjW6O0Rx6i ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset some-name-cfg + local generation=2 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B7rwfcuApo +++ mktemp ++ local LAST_ERR=/tmp/tmp.wWMqFdJDqv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B7rwfcuApo ++ cat /tmp/tmp.wWMqFdJDqv ++ rm /tmp/tmp.B7rwfcuApo /tmp/tmp.wWMqFdJDqv ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AvNioMU9qD +++ mktemp ++ local LAST_ERR=/tmp/tmp.p9xqC2lg6y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AvNioMU9qD ++ cat /tmp/tmp.p9xqC2lg6y ++ rm /tmp/tmp.AvNioMU9qD /tmp/tmp.p9xqC2lg6y ++ return 0 + pod_name=cmctl-7c7f6bf77d-4qbg8 + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cbkwaJgYXz +++ mktemp ++ local LAST_ERR=/tmp/tmp.MiYNczJefk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cbkwaJgYXz ++ cat /tmp/tmp.MiYNczJefk ++ rm /tmp/tmp.cbkwaJgYXz /tmp/tmp.MiYNczJefk ++ return 0 + revision=1 + kubectl_bin exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.FoHoxvOXAa ++ mktemp + local LAST_ERR=/tmp/tmp.2n7hE3P793 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FoHoxvOXAa Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-18466/some-name-ssl-internal + cat /tmp/tmp.2n7hE3P793 + rm /tmp/tmp.FoHoxvOXAa /tmp/tmp.2n7hE3P793 + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yBZOkXejpZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Wy78SzI4rO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yBZOkXejpZ ++ cat /tmp/tmp.Wy78SzI4rO ++ rm /tmp/tmp.yBZOkXejpZ /tmp/tmp.Wy78SzI4rO ++ return 0 + new_revision=2 + '[' 2 == 2 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3QhYXRZREP +++ mktemp ++ local LAST_ERR=/tmp/tmp.tjNRY6ccfX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3QhYXRZREP ++ cat /tmp/tmp.tjNRY6ccfX ++ rm /tmp/tmp.3QhYXRZREP /tmp/tmp.tjNRY6ccfX ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0l4lII78Kn +++ mktemp ++ local LAST_ERR=/tmp/tmp.QQTD7bw6zs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0l4lII78Kn ++ cat /tmp/tmp.QQTD7bw6zs ++ rm /tmp/tmp.0l4lII78Kn /tmp/tmp.QQTD7bw6zs ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.23RtTAedlJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.tV6hY5MaaJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.23RtTAedlJ ++ cat /tmp/tmp.tV6hY5MaaJ ++ rm /tmp/tmp.23RtTAedlJ /tmp/tmp.tV6hY5MaaJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................................................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ymdIQVW2TL +++ mktemp ++ local LAST_ERR=/tmp/tmp.ixMW1JWBvf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ymdIQVW2TL ++ cat /tmp/tmp.ixMW1JWBvf ++ rm /tmp/tmp.ymdIQVW2TL /tmp/tmp.ixMW1JWBvf ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vZAHCPfLqf +++ mktemp ++ local LAST_ERR=/tmp/tmp.p2XT1i19j0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vZAHCPfLqf ++ cat /tmp/tmp.p2XT1i19j0 ++ rm /tmp/tmp.vZAHCPfLqf /tmp/tmp.p2XT1i19j0 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oGr4dp5SGn +++ mktemp ++ local LAST_ERR=/tmp/tmp.wc6tyAIdqi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oGr4dp5SGn ++ cat /tmp/tmp.wc6tyAIdqi ++ rm /tmp/tmp.oGr4dp5SGn /tmp/tmp.wc6tyAIdqi ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5VN6C5araV +++ mktemp ++ local LAST_ERR=/tmp/tmp.jasWchhDc4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5VN6C5araV ++ cat /tmp/tmp.jasWchhDc4 ++ rm /tmp/tmp.5VN6C5araV /tmp/tmp.jasWchhDc4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AEcpQBRja8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.EtxgXRvZhB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AEcpQBRja8 ++ cat /tmp/tmp.EtxgXRvZhB ++ rm /tmp/tmp.AEcpQBRja8 /tmp/tmp.EtxgXRvZhB ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2sUPW2Zi5z +++ mktemp ++ local LAST_ERR=/tmp/tmp.4G2QSpWq8S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2sUPW2Zi5z ++ cat /tmp/tmp.4G2QSpWq8S ++ rm /tmp/tmp.2sUPW2Zi5z /tmp/tmp.4G2QSpWq8S ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tRBRhO1cm9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MzLESqD0xX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tRBRhO1cm9 ++ cat /tmp/tmp.MzLESqD0xX ++ rm /tmp/tmp.tRBRhO1cm9 /tmp/tmp.MzLESqD0xX ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 3 statefulset some-name-rs0 + local generation=3 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mWiCmGf2Us +++ mktemp ++ local LAST_ERR=/tmp/tmp.dNDg9eLrOI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mWiCmGf2Us ++ cat /tmp/tmp.dNDg9eLrOI ++ rm /tmp/tmp.mWiCmGf2Us /tmp/tmp.dNDg9eLrOI ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + compare_generation 3 statefulset some-name-cfg + local generation=3 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QonXOH00tf +++ mktemp ++ local LAST_ERR=/tmp/tmp.FCrjsmZJdy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QonXOH00tf ++ cat /tmp/tmp.FCrjsmZJdy ++ rm /tmp/tmp.QonXOH00tf /tmp/tmp.FCrjsmZJdy ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/some-name-rs0 -1201 + local resource=service/some-name-rs0 + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201.yml + local new_result=/tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201-oc.yml ']' + kubectl_bin get -o yaml service/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.nnOz5mPpNl ++ mktemp + local LAST_ERR=/tmp/tmp.NgDz5q1of8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nnOz5mPpNl + cat /tmp/tmp.NgDz5q1of8 + rm /tmp/tmp.nnOz5mPpNl /tmp/tmp.NgDz5q1of8 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1201.yml /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + log 'compare_kubectl: service/some-name-rs0 OK' + set +o xtrace [2025-12-18T20:35:13+0000] compare_kubectl: service/some-name-rs0 OK + compare_kubectl service/some-name-cfg -1201 + local resource=service/some-name-cfg + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201.yml + local new_result=/tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201-oc.yml ']' + kubectl_bin get -o yaml service/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ZqqwYCmnWN ++ mktemp + local LAST_ERR=/tmp/tmp.u6baM1MwkF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZqqwYCmnWN + cat /tmp/tmp.u6baM1MwkF + rm /tmp/tmp.ZqqwYCmnWN /tmp/tmp.u6baM1MwkF + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1201.yml /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + log 'compare_kubectl: service/some-name-cfg OK' + set +o xtrace [2025-12-18T20:35:14+0000] compare_kubectl: service/some-name-cfg OK + compare_kubectl statefulset/some-name-rs0 -1201 + local resource=statefulset/some-name-rs0 + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201.yml + local new_result=/tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.uEPIHq9bDj ++ mktemp + local LAST_ERR=/tmp/tmp.E5Lo2CK2zh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uEPIHq9bDj + cat /tmp/tmp.E5Lo2CK2zh + rm /tmp/tmp.uEPIHq9bDj /tmp/tmp.E5Lo2CK2zh + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1201.yml /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-18T20:35:15+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg -1201 + local resource=statefulset/some-name-cfg + local postfix=-1201 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201.yml + local new_result=/tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Kf73qt9usW ++ mktemp + local LAST_ERR=/tmp/tmp.9n9cVk4Gr1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Kf73qt9usW + cat /tmp/tmp.9n9cVk4Gr1 + rm /tmp/tmp.Kf73qt9usW /tmp/tmp.9n9cVk4Gr1 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1201.yml /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-18T20:35:16+0000] compare_kubectl: statefulset/some-name-cfg OK + desc 'test 1.21.1' + set +o xtrace ----------------------------------------------------------------------------------- test 1.21.1 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"crVersion":"1.21.1"} }' ++ mktemp + local LAST_OUT=/tmp/tmp.hqWNCuOnTz ++ mktemp + local LAST_ERR=/tmp/tmp.Sup8C8xNYk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"crVersion":"1.21.1"} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hqWNCuOnTz perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.Sup8C8xNYk + rm /tmp/tmp.hqWNCuOnTz /tmp/tmp.Sup8C8xNYk + return 0 + sleep 20 + desc 'check if Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod started ----------------------------------------------------------------------------------- + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DFlOOGzwvt +++ mktemp ++ local LAST_ERR=/tmp/tmp.FBtgmf5R7T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DFlOOGzwvt ++ cat /tmp/tmp.FBtgmf5R7T ++ rm /tmp/tmp.DFlOOGzwvt /tmp/tmp.FBtgmf5R7T ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WogivyIGv0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gDTQtrDSrH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WogivyIGv0 ++ cat /tmp/tmp.gDTQtrDSrH ++ rm /tmp/tmp.WogivyIGv0 /tmp/tmp.gDTQtrDSrH ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.COPqp3GRTp +++ mktemp ++ local LAST_ERR=/tmp/tmp.7dmcCoVn4g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.COPqp3GRTp ++ cat /tmp/tmp.7dmcCoVn4g ++ rm /tmp/tmp.COPqp3GRTp /tmp/tmp.7dmcCoVn4g ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................................................. + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wlqq99tXOm +++ mktemp ++ local LAST_ERR=/tmp/tmp.eGy1IPBSCm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Wlqq99tXOm ++ cat /tmp/tmp.eGy1IPBSCm ++ rm /tmp/tmp.Wlqq99tXOm /tmp/tmp.eGy1IPBSCm ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k3ZHDyh4av +++ mktemp ++ local LAST_ERR=/tmp/tmp.m1YJfYtnve ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k3ZHDyh4av ++ cat /tmp/tmp.m1YJfYtnve ++ rm /tmp/tmp.k3ZHDyh4av /tmp/tmp.m1YJfYtnve ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IkZ7QWICBS +++ mktemp ++ local LAST_ERR=/tmp/tmp.KUS9117z7K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IkZ7QWICBS ++ cat /tmp/tmp.KUS9117z7K ++ rm /tmp/tmp.IkZ7QWICBS /tmp/tmp.KUS9117z7K ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qEUSo7X0Vc +++ mktemp ++ local LAST_ERR=/tmp/tmp.f8AqUi1D75 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qEUSo7X0Vc ++ cat /tmp/tmp.f8AqUi1D75 ++ rm /tmp/tmp.qEUSo7X0Vc /tmp/tmp.f8AqUi1D75 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VvSTEMqvzR +++ mktemp ++ local LAST_ERR=/tmp/tmp.G0K0jjGsih ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VvSTEMqvzR ++ cat /tmp/tmp.G0K0jjGsih ++ rm /tmp/tmp.VvSTEMqvzR /tmp/tmp.G0K0jjGsih ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HDQvjBinEm +++ mktemp ++ local LAST_ERR=/tmp/tmp.JAKjo5KBCO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HDQvjBinEm ++ cat /tmp/tmp.JAKjo5KBCO ++ rm /tmp/tmp.HDQvjBinEm /tmp/tmp.JAKjo5KBCO ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EGTQxjam2C +++ mktemp ++ local LAST_ERR=/tmp/tmp.WxYmyphiE7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EGTQxjam2C ++ cat /tmp/tmp.WxYmyphiE7 ++ rm /tmp/tmp.EGTQxjam2C /tmp/tmp.WxYmyphiE7 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 4 statefulset some-name-rs0 + local generation=4 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N9EA4S6juM +++ mktemp ++ local LAST_ERR=/tmp/tmp.QeWyxhfKF9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N9EA4S6juM ++ cat /tmp/tmp.QeWyxhfKF9 ++ rm /tmp/tmp.N9EA4S6juM /tmp/tmp.QeWyxhfKF9 ++ return 0 + current_generation=4 + [[ 4 != \4 ]] + compare_generation 4 statefulset some-name-cfg + local generation=4 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ex2AyJRcb0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.V925RvwLUV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ex2AyJRcb0 ++ cat /tmp/tmp.V925RvwLUV ++ rm /tmp/tmp.ex2AyJRcb0 /tmp/tmp.V925RvwLUV ++ return 0 + current_generation=4 + [[ 4 != \4 ]] + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rh2SvyVOM2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8PwZPfiuXx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rh2SvyVOM2 ++ cat /tmp/tmp.8PwZPfiuXx ++ rm /tmp/tmp.rh2SvyVOM2 /tmp/tmp.8PwZPfiuXx ++ return 0 + pod_name=cmctl-7c7f6bf77d-4qbg8 + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QwBfqhHlkA +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y92IUNTvY5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QwBfqhHlkA ++ cat /tmp/tmp.Y92IUNTvY5 ++ rm /tmp/tmp.QwBfqhHlkA /tmp/tmp.Y92IUNTvY5 ++ return 0 + revision=2 + kubectl_bin exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.vwYvSjRugx ++ mktemp + local LAST_ERR=/tmp/tmp.fV19Btix8v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vwYvSjRugx Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-18466/some-name-ssl + cat /tmp/tmp.fV19Btix8v + rm /tmp/tmp.vwYvSjRugx /tmp/tmp.fV19Btix8v + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bVaOWsv9Od +++ mktemp ++ local LAST_ERR=/tmp/tmp.CsF7gdNKfD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bVaOWsv9Od ++ cat /tmp/tmp.CsF7gdNKfD ++ rm /tmp/tmp.bVaOWsv9Od /tmp/tmp.CsF7gdNKfD ++ return 0 + new_revision=3 + '[' 3 == 3 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.clsvWKqeCK +++ mktemp ++ local LAST_ERR=/tmp/tmp.PdN4nU3MXW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.clsvWKqeCK ++ cat /tmp/tmp.PdN4nU3MXW ++ rm /tmp/tmp.clsvWKqeCK /tmp/tmp.PdN4nU3MXW ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D5xUlYc0gV +++ mktemp ++ local LAST_ERR=/tmp/tmp.UXkapjRXqJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D5xUlYc0gV ++ cat /tmp/tmp.UXkapjRXqJ ++ rm /tmp/tmp.D5xUlYc0gV /tmp/tmp.UXkapjRXqJ ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iH6hEd7WSN +++ mktemp ++ local LAST_ERR=/tmp/tmp.eKQaKw7Iah ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iH6hEd7WSN ++ cat /tmp/tmp.eKQaKw7Iah ++ rm /tmp/tmp.iH6hEd7WSN /tmp/tmp.eKQaKw7Iah ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................................... + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ncRpjo4QE3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5f8lSIveEM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ncRpjo4QE3 ++ cat /tmp/tmp.5f8lSIveEM ++ rm /tmp/tmp.ncRpjo4QE3 /tmp/tmp.5f8lSIveEM ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IGvZWkbEeM +++ mktemp ++ local LAST_ERR=/tmp/tmp.oqH3LAV93p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IGvZWkbEeM ++ cat /tmp/tmp.oqH3LAV93p ++ rm /tmp/tmp.IGvZWkbEeM /tmp/tmp.oqH3LAV93p ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f5JtPw7uhg +++ mktemp ++ local LAST_ERR=/tmp/tmp.qGJsLfKNw5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f5JtPw7uhg ++ cat /tmp/tmp.qGJsLfKNw5 ++ rm /tmp/tmp.f5JtPw7uhg /tmp/tmp.qGJsLfKNw5 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.seXYgSB3ZI +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZoD12L7V0R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.seXYgSB3ZI ++ cat /tmp/tmp.ZoD12L7V0R ++ rm /tmp/tmp.seXYgSB3ZI /tmp/tmp.ZoD12L7V0R ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sDn8WSC6R8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bgImsbEaAY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sDn8WSC6R8 ++ cat /tmp/tmp.bgImsbEaAY ++ rm /tmp/tmp.sDn8WSC6R8 /tmp/tmp.bgImsbEaAY ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gKgZa2UKQI +++ mktemp ++ local LAST_ERR=/tmp/tmp.hXW5I9MT1f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gKgZa2UKQI ++ cat /tmp/tmp.hXW5I9MT1f ++ rm /tmp/tmp.gKgZa2UKQI /tmp/tmp.hXW5I9MT1f ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XNoezBKRiQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.RmCAO3xH39 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XNoezBKRiQ ++ cat /tmp/tmp.RmCAO3xH39 ++ rm /tmp/tmp.XNoezBKRiQ /tmp/tmp.RmCAO3xH39 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 5 statefulset some-name-rs0 + local generation=5 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e8A49XVYVG +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q57n37KF43 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e8A49XVYVG ++ cat /tmp/tmp.Q57n37KF43 ++ rm /tmp/tmp.e8A49XVYVG /tmp/tmp.Q57n37KF43 ++ return 0 + current_generation=5 + [[ 5 != \5 ]] + compare_generation 5 statefulset some-name-cfg + local generation=5 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gBgwouRQeQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6F4FSWmMlk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gBgwouRQeQ ++ cat /tmp/tmp.6F4FSWmMlk ++ rm /tmp/tmp.gBgwouRQeQ /tmp/tmp.6F4FSWmMlk ++ return 0 + current_generation=5 + [[ 5 != \5 ]] + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P7VkquSSg7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jEAQXv6rbu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P7VkquSSg7 ++ cat /tmp/tmp.jEAQXv6rbu ++ rm /tmp/tmp.P7VkquSSg7 /tmp/tmp.jEAQXv6rbu ++ return 0 + pod_name=cmctl-7c7f6bf77d-4qbg8 + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4tXLaxahvD +++ mktemp ++ local LAST_ERR=/tmp/tmp.xD04wa29xP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4tXLaxahvD ++ cat /tmp/tmp.xD04wa29xP ++ rm /tmp/tmp.4tXLaxahvD /tmp/tmp.xD04wa29xP ++ return 0 + revision=2 + kubectl_bin exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.tRCfVkphzU ++ mktemp + local LAST_ERR=/tmp/tmp.5g7Jfiy2AE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tRCfVkphzU Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-18466/some-name-ssl-internal + cat /tmp/tmp.5g7Jfiy2AE + rm /tmp/tmp.tRCfVkphzU /tmp/tmp.5g7Jfiy2AE + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uMdtnoiYji +++ mktemp ++ local LAST_ERR=/tmp/tmp.cJVQxND81V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uMdtnoiYji ++ cat /tmp/tmp.cJVQxND81V ++ rm /tmp/tmp.uMdtnoiYji /tmp/tmp.cJVQxND81V ++ return 0 + new_revision=2 + '[' 3 == 2 ']' + sleep 1 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dfUY5V3fji +++ mktemp ++ local LAST_ERR=/tmp/tmp.esQRVy8mny ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dfUY5V3fji ++ cat /tmp/tmp.esQRVy8mny ++ rm /tmp/tmp.dfUY5V3fji /tmp/tmp.esQRVy8mny ++ return 0 + new_revision=3 + '[' 3 == 3 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tFYkcHnTJO +++ mktemp ++ local LAST_ERR=/tmp/tmp.N48JXLEhRk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tFYkcHnTJO ++ cat /tmp/tmp.N48JXLEhRk ++ rm /tmp/tmp.tFYkcHnTJO /tmp/tmp.N48JXLEhRk ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hT6rvtITLD +++ mktemp ++ local LAST_ERR=/tmp/tmp.dtSi6o5jVl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hT6rvtITLD ++ cat /tmp/tmp.dtSi6o5jVl ++ rm /tmp/tmp.hT6rvtITLD /tmp/tmp.dtSi6o5jVl ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T5P36Agz8H +++ mktemp ++ local LAST_ERR=/tmp/tmp.WYdJX8HFvx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T5P36Agz8H ++ cat /tmp/tmp.WYdJX8HFvx ++ rm /tmp/tmp.T5P36Agz8H /tmp/tmp.WYdJX8HFvx ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................................................................................................. + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i1ZJrF4h78 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4dJhufyjfr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i1ZJrF4h78 ++ cat /tmp/tmp.4dJhufyjfr ++ rm /tmp/tmp.i1ZJrF4h78 /tmp/tmp.4dJhufyjfr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2MeFw7ROAW +++ mktemp ++ local LAST_ERR=/tmp/tmp.60L27q64oD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2MeFw7ROAW ++ cat /tmp/tmp.60L27q64oD ++ rm /tmp/tmp.2MeFw7ROAW /tmp/tmp.60L27q64oD ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mfqpl76HRd +++ mktemp ++ local LAST_ERR=/tmp/tmp.iB70SsXcsl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Mfqpl76HRd ++ cat /tmp/tmp.iB70SsXcsl ++ rm /tmp/tmp.Mfqpl76HRd /tmp/tmp.iB70SsXcsl ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Nm9hBxFGrV +++ mktemp ++ local LAST_ERR=/tmp/tmp.pgQZC9di1V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Nm9hBxFGrV ++ cat /tmp/tmp.pgQZC9di1V ++ rm /tmp/tmp.Nm9hBxFGrV /tmp/tmp.pgQZC9di1V ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8IqbaAYoYI +++ mktemp ++ local LAST_ERR=/tmp/tmp.GLsePQc8Ju ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8IqbaAYoYI ++ cat /tmp/tmp.GLsePQc8Ju ++ rm /tmp/tmp.8IqbaAYoYI /tmp/tmp.GLsePQc8Ju ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aONvmbvVYd +++ mktemp ++ local LAST_ERR=/tmp/tmp.TQJasYXNtx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aONvmbvVYd ++ cat /tmp/tmp.TQJasYXNtx ++ rm /tmp/tmp.aONvmbvVYd /tmp/tmp.TQJasYXNtx ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3WlCFlNx8X +++ mktemp ++ local LAST_ERR=/tmp/tmp.P2y1jGlYWE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3WlCFlNx8X ++ cat /tmp/tmp.P2y1jGlYWE ++ rm /tmp/tmp.3WlCFlNx8X /tmp/tmp.P2y1jGlYWE ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 6 statefulset some-name-rs0 + local generation=6 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tlcWSCCkX0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zQzSUcAQ5K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tlcWSCCkX0 ++ cat /tmp/tmp.zQzSUcAQ5K ++ rm /tmp/tmp.tlcWSCCkX0 /tmp/tmp.zQzSUcAQ5K ++ return 0 + current_generation=6 + [[ 6 != \6 ]] + compare_generation 6 statefulset some-name-cfg + local generation=6 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rmf2TTKVED +++ mktemp ++ local LAST_ERR=/tmp/tmp.FYeTlOd1Cv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rmf2TTKVED ++ cat /tmp/tmp.FYeTlOd1Cv ++ rm /tmp/tmp.rmf2TTKVED /tmp/tmp.FYeTlOd1Cv ++ return 0 + current_generation=6 + [[ 6 != \6 ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/some-name-rs0 -1211 + local resource=service/some-name-rs0 + local postfix=-1211 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1211.yml + local new_result=/tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1211-oc.yml ']' + kubectl_bin get -o yaml service/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.5i0cEAtc56 ++ mktemp + local LAST_ERR=/tmp/tmp.7bCrjGQBDl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5i0cEAtc56 + cat /tmp/tmp.7bCrjGQBDl + rm /tmp/tmp.5i0cEAtc56 /tmp/tmp.7bCrjGQBDl + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1211.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1211.yml /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + log 'compare_kubectl: service/some-name-rs0 OK' + set +o xtrace [2025-12-18T20:52:21+0000] compare_kubectl: service/some-name-rs0 OK + compare_kubectl service/some-name-cfg -1211 + local resource=service/some-name-cfg + local postfix=-1211 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1211.yml + local new_result=/tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1211-oc.yml ']' + kubectl_bin get -o yaml service/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.QyUegbhALI ++ mktemp + local LAST_ERR=/tmp/tmp.KW301wGmE4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QyUegbhALI + cat /tmp/tmp.KW301wGmE4 + rm /tmp/tmp.QyUegbhALI /tmp/tmp.KW301wGmE4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1211.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1211.yml /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + log 'compare_kubectl: service/some-name-cfg OK' + set +o xtrace [2025-12-18T20:52:22+0000] compare_kubectl: service/some-name-cfg OK + compare_kubectl statefulset/some-name-rs0 -1211 + local resource=statefulset/some-name-rs0 + local postfix=-1211 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1211.yml + local new_result=/tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1211-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vP9lU2qvim ++ mktemp + local LAST_ERR=/tmp/tmp.d6sU6JeTUb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vP9lU2qvim + cat /tmp/tmp.d6sU6JeTUb + rm /tmp/tmp.vP9lU2qvim /tmp/tmp.d6sU6JeTUb + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1211.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1211.yml /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-18T20:52:22+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg -1211 + local resource=statefulset/some-name-cfg + local postfix=-1211 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1211.yml + local new_result=/tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1211-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.9g9cEf6G8f ++ mktemp + local LAST_ERR=/tmp/tmp.03dKGn5YpM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9g9cEf6G8f + cat /tmp/tmp.03dKGn5YpM + rm /tmp/tmp.9g9cEf6G8f /tmp/tmp.03dKGn5YpM + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1211.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1211.yml /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-18T20:52:23+0000] compare_kubectl: statefulset/some-name-cfg OK + desc 'test 1.22.0' + set +o xtrace ----------------------------------------------------------------------------------- test 1.22.0 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=merge --patch '{ "spec": {"crVersion":"1.22.0"} }' ++ mktemp + local LAST_OUT=/tmp/tmp.B36TfznhYD ++ mktemp + local LAST_ERR=/tmp/tmp.lCQmJXqnHE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=merge --patch '{ "spec": {"crVersion":"1.22.0"} }' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B36TfznhYD perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.lCQmJXqnHE + rm /tmp/tmp.B36TfznhYD /tmp/tmp.lCQmJXqnHE + return 0 + sleep 20 + desc 'check if Pod started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod started ----------------------------------------------------------------------------------- + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2xHBcMNITa +++ mktemp ++ local LAST_ERR=/tmp/tmp.gEEcRNjbNh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2xHBcMNITa ++ cat /tmp/tmp.gEEcRNjbNh ++ rm /tmp/tmp.2xHBcMNITa /tmp/tmp.gEEcRNjbNh ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cBElxezvta +++ mktemp ++ local LAST_ERR=/tmp/tmp.pSf5Em5Ocy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cBElxezvta ++ cat /tmp/tmp.pSf5Em5Ocy ++ rm /tmp/tmp.cBElxezvta /tmp/tmp.pSf5Em5Ocy ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HbN9KU3cHP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tgot4QE3Ie ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HbN9KU3cHP ++ cat /tmp/tmp.Tgot4QE3Ie ++ rm /tmp/tmp.HbN9KU3cHP /tmp/tmp.Tgot4QE3Ie ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.If6yiLA4pQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.YexQXUcuQD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.If6yiLA4pQ ++ cat /tmp/tmp.YexQXUcuQD ++ rm /tmp/tmp.If6yiLA4pQ /tmp/tmp.YexQXUcuQD ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ojfGPyUIPf +++ mktemp ++ local LAST_ERR=/tmp/tmp.NHn808e1X5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ojfGPyUIPf ++ cat /tmp/tmp.NHn808e1X5 ++ rm /tmp/tmp.ojfGPyUIPf /tmp/tmp.NHn808e1X5 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KtgqVZWY3N +++ mktemp ++ local LAST_ERR=/tmp/tmp.HTxjtdBWlQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KtgqVZWY3N ++ cat /tmp/tmp.HTxjtdBWlQ ++ rm /tmp/tmp.KtgqVZWY3N /tmp/tmp.HTxjtdBWlQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TbC0SZB42i +++ mktemp ++ local LAST_ERR=/tmp/tmp.xj2KPvyuKu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TbC0SZB42i ++ cat /tmp/tmp.xj2KPvyuKu ++ rm /tmp/tmp.TbC0SZB42i /tmp/tmp.xj2KPvyuKu ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ze3e4kJBMz +++ mktemp ++ local LAST_ERR=/tmp/tmp.OlcGgiF1DU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ze3e4kJBMz ++ cat /tmp/tmp.OlcGgiF1DU ++ rm /tmp/tmp.Ze3e4kJBMz /tmp/tmp.OlcGgiF1DU ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZoloGmWNRr +++ mktemp ++ local LAST_ERR=/tmp/tmp.8vTrSMI3fU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZoloGmWNRr ++ cat /tmp/tmp.8vTrSMI3fU ++ rm /tmp/tmp.ZoloGmWNRr /tmp/tmp.8vTrSMI3fU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.izjUeYGuBL +++ mktemp ++ local LAST_ERR=/tmp/tmp.mejhGkqYLv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.izjUeYGuBL ++ cat /tmp/tmp.mejhGkqYLv ++ rm /tmp/tmp.izjUeYGuBL /tmp/tmp.mejhGkqYLv ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 6 statefulset some-name-rs0 + local generation=6 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mdRLKfOKe5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1BXPIbEXff ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mdRLKfOKe5 ++ cat /tmp/tmp.1BXPIbEXff ++ rm /tmp/tmp.mdRLKfOKe5 /tmp/tmp.1BXPIbEXff ++ return 0 + current_generation=6 + [[ 6 != \6 ]] + compare_generation 6 statefulset some-name-cfg + local generation=6 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wPPhvcq2tx +++ mktemp ++ local LAST_ERR=/tmp/tmp.jy0pGgKnpR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wPPhvcq2tx ++ cat /tmp/tmp.jy0pGgKnpR ++ rm /tmp/tmp.wPPhvcq2tx /tmp/tmp.jy0pGgKnpR ++ return 0 + current_generation=6 + [[ 6 != \6 ]] + renew_certificate some-name-ssl + certificate=some-name-ssl + wait_certificate some-name-ssl + certificate=some-name-ssl + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl --timeout=60s certificate.cert-manager.io/some-name-ssl condition met + sleep 1 + desc 'renew some-name-ssl' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R6lkIULA0w +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nt8V51iDKN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R6lkIULA0w ++ cat /tmp/tmp.Nt8V51iDKN ++ rm /tmp/tmp.R6lkIULA0w /tmp/tmp.Nt8V51iDKN ++ return 0 + pod_name=cmctl-7c7f6bf77d-4qbg8 + local revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2SuXAqq60p +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xmgzd1r8fa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2SuXAqq60p ++ cat /tmp/tmp.Xmgzd1r8fa ++ rm /tmp/tmp.2SuXAqq60p /tmp/tmp.Xmgzd1r8fa ++ return 0 + revision=3 + kubectl_bin exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl ++ mktemp + local LAST_OUT=/tmp/tmp.8YPGKCLOqG ++ mktemp + local LAST_ERR=/tmp/tmp.JrddJ5ULYS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8YPGKCLOqG Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-18466/some-name-ssl + cat /tmp/tmp.JrddJ5ULYS + rm /tmp/tmp.8YPGKCLOqG /tmp/tmp.JrddJ5ULYS + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3tlV0EQAPg +++ mktemp ++ local LAST_ERR=/tmp/tmp.z9ibT4NVre ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3tlV0EQAPg ++ cat /tmp/tmp.z9ibT4NVre ++ rm /tmp/tmp.3tlV0EQAPg /tmp/tmp.z9ibT4NVre ++ return 0 + new_revision=3 + '[' 4 == 3 ']' + sleep 1 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6jcMfIiSTj +++ mktemp ++ local LAST_ERR=/tmp/tmp.iOxrRzZU3q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6jcMfIiSTj ++ cat /tmp/tmp.iOxrRzZU3q ++ rm /tmp/tmp.6jcMfIiSTj /tmp/tmp.iOxrRzZU3q ++ return 0 + new_revision=4 + '[' 4 == 4 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wvk7UQxwGP +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rls3UypllL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wvk7UQxwGP ++ cat /tmp/tmp.Rls3UypllL ++ rm /tmp/tmp.wvk7UQxwGP /tmp/tmp.Rls3UypllL ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2awCFD9i5M +++ mktemp ++ local LAST_ERR=/tmp/tmp.SSzI6vgdZV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2awCFD9i5M ++ cat /tmp/tmp.SSzI6vgdZV ++ rm /tmp/tmp.2awCFD9i5M /tmp/tmp.SSzI6vgdZV ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CEjC5307zd +++ mktemp ++ local LAST_ERR=/tmp/tmp.6I8FByDvLB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CEjC5307zd ++ cat /tmp/tmp.6I8FByDvLB ++ rm /tmp/tmp.CEjC5307zd /tmp/tmp.6I8FByDvLB ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................................. + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nItFE5Zn5Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.phWaoq29F7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nItFE5Zn5Q ++ cat /tmp/tmp.phWaoq29F7 ++ rm /tmp/tmp.nItFE5Zn5Q /tmp/tmp.phWaoq29F7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NcRMFVcNzY +++ mktemp ++ local LAST_ERR=/tmp/tmp.JMlU2vC7mM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NcRMFVcNzY ++ cat /tmp/tmp.JMlU2vC7mM ++ rm /tmp/tmp.NcRMFVcNzY /tmp/tmp.JMlU2vC7mM ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VVhRrW85CC +++ mktemp ++ local LAST_ERR=/tmp/tmp.rcFR193fL8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VVhRrW85CC ++ cat /tmp/tmp.rcFR193fL8 ++ rm /tmp/tmp.VVhRrW85CC /tmp/tmp.rcFR193fL8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dnfuY709lx +++ mktemp ++ local LAST_ERR=/tmp/tmp.NBkJ34d0Cf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dnfuY709lx ++ cat /tmp/tmp.NBkJ34d0Cf ++ rm /tmp/tmp.dnfuY709lx /tmp/tmp.NBkJ34d0Cf ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UdIlSvrfDX +++ mktemp ++ local LAST_ERR=/tmp/tmp.11ZPy1aUAo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UdIlSvrfDX ++ cat /tmp/tmp.11ZPy1aUAo ++ rm /tmp/tmp.UdIlSvrfDX /tmp/tmp.11ZPy1aUAo ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K2KjMk262P +++ mktemp ++ local LAST_ERR=/tmp/tmp.QPmm2BU6Tn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K2KjMk262P ++ cat /tmp/tmp.QPmm2BU6Tn ++ rm /tmp/tmp.K2KjMk262P /tmp/tmp.QPmm2BU6Tn ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qduP5Up0m7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8eol65QCLu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qduP5Up0m7 ++ cat /tmp/tmp.8eol65QCLu ++ rm /tmp/tmp.qduP5Up0m7 /tmp/tmp.8eol65QCLu ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 7 statefulset some-name-rs0 + local generation=7 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HAUgJFP3tK +++ mktemp ++ local LAST_ERR=/tmp/tmp.BmbofhNCF1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HAUgJFP3tK ++ cat /tmp/tmp.BmbofhNCF1 ++ rm /tmp/tmp.HAUgJFP3tK /tmp/tmp.BmbofhNCF1 ++ return 0 + current_generation=7 + [[ 7 != \7 ]] + compare_generation 7 statefulset some-name-cfg + local generation=7 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BkqBrRyoor +++ mktemp ++ local LAST_ERR=/tmp/tmp.LjMEPJ6S68 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BkqBrRyoor ++ cat /tmp/tmp.LjMEPJ6S68 ++ rm /tmp/tmp.BkqBrRyoor /tmp/tmp.LjMEPJ6S68 ++ return 0 + current_generation=7 + [[ 7 != \7 ]] + renew_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + wait_certificate some-name-ssl-internal + certificate=some-name-ssl-internal + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + for i in {1..10} + kubectl wait --for=condition=Ready certificate/some-name-ssl-internal --timeout=60s certificate.cert-manager.io/some-name-ssl-internal condition met + sleep 1 + desc 'renew some-name-ssl-internal' + set +o xtrace ----------------------------------------------------------------------------------- renew some-name-ssl-internal ----------------------------------------------------------------------------------- + local pod_name ++ kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XrOLmQWgBM +++ mktemp ++ local LAST_ERR=/tmp/tmp.F6FDobwZYr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XrOLmQWgBM ++ cat /tmp/tmp.F6FDobwZYr ++ rm /tmp/tmp.XrOLmQWgBM /tmp/tmp.F6FDobwZYr ++ return 0 + pod_name=cmctl-7c7f6bf77d-4qbg8 + local revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6IVO4kwPab +++ mktemp ++ local LAST_ERR=/tmp/tmp.bXHnfQuuIA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6IVO4kwPab ++ cat /tmp/tmp.bXHnfQuuIA ++ rm /tmp/tmp.6IVO4kwPab /tmp/tmp.bXHnfQuuIA ++ return 0 + revision=3 + kubectl_bin exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl-internal ++ mktemp + local LAST_OUT=/tmp/tmp.2VUQ5wsn9V ++ mktemp + local LAST_ERR=/tmp/tmp.q0NOE8dRZY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec cmctl-7c7f6bf77d-4qbg8 -- /tmp/cmctl renew some-name-ssl-internal + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2VUQ5wsn9V Manually triggered issuance of Certificate upgrade-consistency-sharded-tls-18466/some-name-ssl-internal + cat /tmp/tmp.q0NOE8dRZY + rm /tmp/tmp.2VUQ5wsn9V /tmp/tmp.q0NOE8dRZY + return 0 + for i in {1..10} + local new_revision ++ kubectl_bin get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hWIu0GLG7P +++ mktemp ++ local LAST_ERR=/tmp/tmp.asefNwQoBW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get certificate some-name-ssl-internal -o 'jsonpath={.status.revision}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hWIu0GLG7P ++ cat /tmp/tmp.asefNwQoBW ++ rm /tmp/tmp.hWIu0GLG7P /tmp/tmp.asefNwQoBW ++ return 0 + new_revision=4 + '[' 4 == 4 ']' + break + sleep 20 + wait_cluster + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TNxOvN3gBP +++ mktemp ++ local LAST_ERR=/tmp/tmp.uQBoc99GQ7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TNxOvN3gBP ++ cat /tmp/tmp.uQBoc99GQ7 ++ rm /tmp/tmp.TNxOvN3gBP /tmp/tmp.uQBoc99GQ7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QZ2lLwzVNx +++ mktemp ++ local LAST_ERR=/tmp/tmp.skK32gctie ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QZ2lLwzVNx ++ cat /tmp/tmp.skK32gctie ++ rm /tmp/tmp.QZ2lLwzVNx /tmp/tmp.skK32gctie ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NhS1HwOJXL +++ mktemp ++ local LAST_ERR=/tmp/tmp.rRucajiz7g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NhS1HwOJXL ++ cat /tmp/tmp.rRucajiz7g ++ rm /tmp/tmp.NhS1HwOJXL /tmp/tmp.rRucajiz7g ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................................ + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ErvtupbcDU +++ mktemp ++ local LAST_ERR=/tmp/tmp.vd2XlTHjwY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ErvtupbcDU ++ cat /tmp/tmp.vd2XlTHjwY ++ rm /tmp/tmp.ErvtupbcDU /tmp/tmp.vd2XlTHjwY ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VdlFyAiVHK +++ mktemp ++ local LAST_ERR=/tmp/tmp.DpFwmd6WLL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VdlFyAiVHK ++ cat /tmp/tmp.DpFwmd6WLL ++ rm /tmp/tmp.VdlFyAiVHK /tmp/tmp.DpFwmd6WLL ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BbQUMhqMNM +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Xb8N7oNoi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BbQUMhqMNM ++ cat /tmp/tmp.2Xb8N7oNoi ++ rm /tmp/tmp.BbQUMhqMNM /tmp/tmp.2Xb8N7oNoi ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7QYgevUPGk +++ mktemp ++ local LAST_ERR=/tmp/tmp.sXtDfbKw35 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7QYgevUPGk ++ cat /tmp/tmp.sXtDfbKw35 ++ rm /tmp/tmp.7QYgevUPGk /tmp/tmp.sXtDfbKw35 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xav93RGNiM +++ mktemp ++ local LAST_ERR=/tmp/tmp.Al75J0195E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xav93RGNiM ++ cat /tmp/tmp.Al75J0195E ++ rm /tmp/tmp.xav93RGNiM /tmp/tmp.Al75J0195E ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rvxvWumirI +++ mktemp ++ local LAST_ERR=/tmp/tmp.5wN8PvogPt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rvxvWumirI ++ cat /tmp/tmp.5wN8PvogPt ++ rm /tmp/tmp.rvxvWumirI /tmp/tmp.5wN8PvogPt ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Hs30DYWQG +++ mktemp ++ local LAST_ERR=/tmp/tmp.i1h6pwgFct ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0Hs30DYWQG ++ cat /tmp/tmp.i1h6pwgFct ++ rm /tmp/tmp.0Hs30DYWQG /tmp/tmp.i1h6pwgFct ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 8 statefulset some-name-rs0 + local generation=8 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lzIh9MkSUM +++ mktemp ++ local LAST_ERR=/tmp/tmp.nWPc3aIan6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lzIh9MkSUM ++ cat /tmp/tmp.nWPc3aIan6 ++ rm /tmp/tmp.lzIh9MkSUM /tmp/tmp.nWPc3aIan6 ++ return 0 + current_generation=8 + [[ 8 != \8 ]] + compare_generation 8 statefulset some-name-cfg + local generation=8 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p1NzfdLgcn +++ mktemp ++ local LAST_ERR=/tmp/tmp.vjmNeKG3sl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p1NzfdLgcn ++ cat /tmp/tmp.vjmNeKG3sl ++ rm /tmp/tmp.p1NzfdLgcn /tmp/tmp.vjmNeKG3sl ++ return 0 + current_generation=8 + [[ 8 != \8 ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/some-name-rs0 -1220 + local resource=service/some-name-rs0 + local postfix=-1220 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1220.yml + local new_result=/tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1220-oc.yml ']' + kubectl_bin get -o yaml service/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.v9OweyvS3i ++ mktemp + local LAST_ERR=/tmp/tmp.lORR4YxE5m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v9OweyvS3i + cat /tmp/tmp.lORR4YxE5m + rm /tmp/tmp.v9OweyvS3i /tmp/tmp.lORR4YxE5m + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1220.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1220.yml /tmp/tmp.IUO4whk7xw/service_some-name-rs0.yml + log 'compare_kubectl: service/some-name-rs0 OK' + set +o xtrace [2025-12-18T21:06:19+0000] compare_kubectl: service/some-name-rs0 OK + compare_kubectl service/some-name-cfg -1220 + local resource=service/some-name-cfg + local postfix=-1220 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1220.yml + local new_result=/tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1220-oc.yml ']' + kubectl_bin get -o yaml service/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.hTpU0DvCOd ++ mktemp + local LAST_ERR=/tmp/tmp.8KKL5QHonR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hTpU0DvCOd + cat /tmp/tmp.8KKL5QHonR + rm /tmp/tmp.hTpU0DvCOd /tmp/tmp.8KKL5QHonR + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1220.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1220.yml /tmp/tmp.IUO4whk7xw/service_some-name-cfg.yml + log 'compare_kubectl: service/some-name-cfg OK' + set +o xtrace [2025-12-18T21:06:19+0000] compare_kubectl: service/some-name-cfg OK + compare_kubectl statefulset/some-name-rs0 -1220 + local resource=statefulset/some-name-rs0 + local postfix=-1220 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1220.yml + local new_result=/tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1220-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.tNvO9k8qEt ++ mktemp + local LAST_ERR=/tmp/tmp.PXDpMH8lV8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tNvO9k8qEt + cat /tmp/tmp.PXDpMH8lV8 + rm /tmp/tmp.tNvO9k8qEt /tmp/tmp.PXDpMH8lV8 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1220.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1220.yml /tmp/tmp.IUO4whk7xw/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-18T21:06:21+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg -1220 + local resource=statefulset/some-name-cfg + local postfix=-1220 + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1220.yml + local new_result=/tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1220-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("upgrade-consistency-sharded-tls-18466", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.qt0KX1zFCY ++ mktemp + local LAST_ERR=/tmp/tmp.5F3XingnYH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qt0KX1zFCY + cat /tmp/tmp.5F3XingnYH + rm /tmp/tmp.qt0KX1zFCY /tmp/tmp.5F3XingnYH + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1220.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1220.yml /tmp/tmp.IUO4whk7xw/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-18T21:06:21+0000] compare_kubectl: statefulset/some-name-cfg OK + destroy upgrade-consistency-sharded-tls-18466 + local namespace=upgrade-consistency-sharded-tls-18466 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.q56BBEeGvQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.yrbSDLs2AU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q56BBEeGvQ ++ cat /tmp/tmp.yrbSDLs2AU No resources found in upgrade-consistency-sharded-tls-18466 namespace. ++ rm /tmp/tmp.q56BBEeGvQ /tmp/tmp.yrbSDLs2AU ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.iOcnGHIRew ++ mktemp + local LAST_ERR=/tmp/tmp.UdltG4KdGq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iOcnGHIRew customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.UdltG4KdGq + rm /tmp/tmp.iOcnGHIRew /tmp/tmp.UdltG4KdGq + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.yIR6kUmidN ++ mktemp + local LAST_ERR=/tmp/tmp.fUEz18drVD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yIR6kUmidN + cat /tmp/tmp.fUEz18drVD + rm /tmp/tmp.yIR6kUmidN /tmp/tmp.fUEz18drVD + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.OjIbwrp2VX ++ mktemp + local LAST_ERR=/tmp/tmp.5WeSjEOF4r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OjIbwrp2VX + cat /tmp/tmp.5WeSjEOF4r + rm /tmp/tmp.OjIbwrp2VX /tmp/tmp.5WeSjEOF4r + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch perconaservermongodbs.psmdb.percona.com -n upgrade-consistency-sharded-tls-18466 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaservermongodb.psmdb.percona.com/some-name patched + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.d1YYcyRJNc ++ mktemp + local LAST_ERR=/tmp/tmp.oNpoLzVQuH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d1YYcyRJNc customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com condition met + cat /tmp/tmp.oNpoLzVQuH + rm /tmp/tmp.d1YYcyRJNc /tmp/tmp.oNpoLzVQuH + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.0r4yJPCBPT ++ mktemp + local LAST_ERR=/tmp/tmp.lQZsHBumiC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2152/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0r4yJPCBPT clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.lQZsHBumiC + rm /tmp/tmp.0r4yJPCBPT /tmp/tmp.lQZsHBumiC + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.EYuWcuhPPG ++ mktemp + local LAST_ERR=/tmp/tmp.G8x7S1oeWu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EYuWcuhPPG namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace deployment.apps "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager" deleted from cert-manager namespace deployment.apps "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.G8x7S1oeWu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EYuWcuhPPG namespace "cert-manager" deleted + cat /tmp/tmp.G8x7S1oeWu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EYuWcuhPPG + cat /tmp/tmp.G8x7S1oeWu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.EYuWcuhPPG + cat /tmp/tmp.G8x7S1oeWu Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.EYuWcuhPPG /tmp/tmp.G8x7S1oeWu + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace upgrade-consistency-sharded-tls-18466 + rm -rf /tmp/tmp.IUO4whk7xw + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.urEwP0aN18 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.DHzmJ41P2W ++ mktemp + local LAST_ERR=/tmp/tmp.Quxnxz0nvH + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.UORKjy9Okm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace upgrade-consistency-sharded-tls-18466 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator