++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/logs/mongod-major-upgrade-sharded.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/logs/mongod-major-upgrade-sharded.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep -eks- Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 ++ '[' v1.32.13-gke.1090000 ']' ++ GKE=1 +++ kubectl version -o json +++ /usr/sbin/sed -r 's/[^0-9.]+//g' +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.32 + set_debug + [[ 1 == 1 ]] + set -o xtrace + main + rbac=rbac + '[' -n psmdb-operator ']' + rbac=cw-rbac + create_infra mongod-major-upgrade-sharded-2121 + local ns=mongod-major-upgrade-sharded-2121 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.m0TO105Zfy ++ mktemp + local LAST_ERR=/tmp/tmp.5EXj3DD5FI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m0TO105Zfy customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.5EXj3DD5FI + rm /tmp/tmp.m0TO105Zfy /tmp/tmp.5EXj3DD5FI + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.hIfSIgjKRT ++ mktemp + local LAST_ERR=/tmp/tmp.pNtfONwVev + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hIfSIgjKRT + cat /tmp/tmp.pNtfONwVev + rm /tmp/tmp.hIfSIgjKRT /tmp/tmp.pNtfONwVev + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5dpGi0DWyk ++ mktemp + local LAST_ERR=/tmp/tmp.sGQPq6NoHL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5dpGi0DWyk + cat /tmp/tmp.sGQPq6NoHL + rm /tmp/tmp.5dpGi0DWyk /tmp/tmp.sGQPq6NoHL + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.je5P0oE2m4 ++ mktemp + local LAST_ERR=/tmp/tmp.se5o6lguLe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.je5P0oE2m4 + cat /tmp/tmp.se5o6lguLe + rm /tmp/tmp.je5P0oE2m4 /tmp/tmp.se5o6lguLe + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.3P3ALHGLHt ++ mktemp + local LAST_ERR=/tmp/tmp.7ClVS1m7Tb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3P3ALHGLHt clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.7ClVS1m7Tb + rm /tmp/tmp.3P3ALHGLHt /tmp/tmp.7ClVS1m7Tb + return 0 + check_crd_for_deletion PR-2272-ae4e3cbc + local git_tag=PR-2272-ae4e3cbc ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2272-ae4e3cbc/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/sbin/sed s/---//g + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.a39r7O8MQm +++ mktemp ++ local LAST_ERR=/tmp/tmp.O6LCfINyLX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.a39r7O8MQm ++ cat /tmp/tmp.O6LCfINyLX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.a39r7O8MQm ++ cat /tmp/tmp.O6LCfINyLX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.a39r7O8MQm ++ cat /tmp/tmp.O6LCfINyLX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.a39r7O8MQm ++ cat /tmp/tmp.O6LCfINyLX Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.a39r7O8MQm /tmp/tmp.O6LCfINyLX ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- ++ mktemp cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.mm2rQNgEF0 + local LAST_OUT=/tmp/tmp.p03zRCi0Ne ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Xp0bIRr8Ac + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.m06dr5JbMv + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mm2rQNgEF0 + cat /tmp/tmp.Xp0bIRr8Ac + rm /tmp/tmp.mm2rQNgEF0 /tmp/tmp.Xp0bIRr8Ac + return 0 namespace "mongod-major-upgrade-sharded-12284" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p03zRCi0Ne namespace "psmdb-operator" deleted + cat /tmp/tmp.m06dr5JbMv + rm /tmp/tmp.p03zRCi0Ne /tmp/tmp.m06dr5JbMv + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FWvB4u6uDq ++ mktemp + local LAST_ERR=/tmp/tmp.ciYv69WwSv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FWvB4u6uDq + cat /tmp/tmp.ciYv69WwSv + rm /tmp/tmp.FWvB4u6uDq /tmp/tmp.ciYv69WwSv + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FmJONvbZQl ++ mktemp + local LAST_ERR=/tmp/tmp.1XhRlB4MOm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FmJONvbZQl namespace/psmdb-operator created + cat /tmp/tmp.1XhRlB4MOm + rm /tmp/tmp.FmJONvbZQl /tmp/tmp.1XhRlB4MOm + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.pIUN6GzqS5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZyITTp0BP1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pIUN6GzqS5 ++ cat /tmp/tmp.ZyITTp0BP1 ++ rm /tmp/tmp.pIUN6GzqS5 /tmp/tmp.ZyITTp0BP1 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster10 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WZziej2DjA ++ mktemp + local LAST_ERR=/tmp/tmp.pipZxFLvRf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster10 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WZziej2DjA Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster10" modified. + cat /tmp/tmp.pipZxFLvRf + rm /tmp/tmp.WZziej2DjA /tmp/tmp.pipZxFLvRf + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.hFffjJLoha ++ mktemp + local LAST_ERR=/tmp/tmp.uxIFqtSeVB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hFffjJLoha customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.uxIFqtSeVB + rm /tmp/tmp.hFffjJLoha /tmp/tmp.uxIFqtSeVB + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.x1PefDMA2Y ++ mktemp + local LAST_ERR=/tmp/tmp.iPjkGl4bfo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x1PefDMA2Y clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.iPjkGl4bfo + rm /tmp/tmp.x1PefDMA2Y /tmp/tmp.iPjkGl4bfo + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.CWUgh2sYiH ++ mktemp + local LAST_ERR=/tmp/tmp.maTUakUqyO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CWUgh2sYiH deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.maTUakUqyO + rm /tmp/tmp.CWUgh2sYiH /tmp/tmp.maTUakUqyO + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.zfE0433zhZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.7dU8Od3V5j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zfE0433zhZ ++ cat /tmp/tmp.7dU8Od3V5j ++ rm /tmp/tmp.zfE0433zhZ /tmp/tmp.7dU8Od3V5j ++ return 0 + wait_operator_pod percona-server-mongodb-operator-6455bb5fb4-8qqdb + local pod=percona-server-mongodb-operator-6455bb5fb4-8qqdb + set +o xtrace waiting for pod/percona-server-mongodb-operator-6455bb5fb4-8qqdb to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.USnaOnjJvR +++ mktemp ++ local LAST_ERR=/tmp/tmp.d9OmfwlH77 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.USnaOnjJvR ++ cat /tmp/tmp.d9OmfwlH77 ++ rm /tmp/tmp.USnaOnjJvR /tmp/tmp.d9OmfwlH77 ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-6455bb5fb4-8qqdb ++ mktemp + local LAST_OUT=/tmp/tmp.83iD380HtG ++ mktemp + local LAST_ERR=/tmp/tmp.6mta3VXTKZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-6455bb5fb4-8qqdb + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.83iD380HtG + cat /tmp/tmp.6mta3VXTKZ + rm /tmp/tmp.83iD380HtG /tmp/tmp.6mta3VXTKZ + return 0 2026-03-26T09:31:15.818Z INFO setup Manager starting up {"gitCommit": "ae4e3cbc053c422311418c8c6083b24139fc7e69", "gitBranch": "PR-2272-ae4e3cbc", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace mongod-major-upgrade-sharded-2121 + local namespace=mongod-major-upgrade-sharded-2121 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces mongod-major-upgrade-sharded-2121' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces mongod-major-upgrade-sharded-2121 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace mongod-major-upgrade-sharded-2121 --ignore-not-found ++ mktemp + xargs kubectl delete ns ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.7ip6yGN0oF ++ mktemp + local LAST_OUT=/tmp/tmp.uZT90oj7GU + local LAST_ERR=/tmp/tmp.J15fuSzTKp + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.IXIKP6nLkl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace mongod-major-upgrade-sharded-2121 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7ip6yGN0oF + cat /tmp/tmp.J15fuSzTKp + rm /tmp/tmp.7ip6yGN0oF /tmp/tmp.J15fuSzTKp + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uZT90oj7GU + cat /tmp/tmp.IXIKP6nLkl + rm /tmp/tmp.uZT90oj7GU /tmp/tmp.IXIKP6nLkl + return 0 + kubectl_bin wait --for=delete namespace mongod-major-upgrade-sharded-2121 ++ mktemp + local LAST_OUT=/tmp/tmp.eCs09haUGF ++ mktemp + local LAST_ERR=/tmp/tmp.jbmHFZwRg4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace mongod-major-upgrade-sharded-2121 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eCs09haUGF + cat /tmp/tmp.jbmHFZwRg4 + rm /tmp/tmp.eCs09haUGF /tmp/tmp.jbmHFZwRg4 + return 0 + desc 'create namespace mongod-major-upgrade-sharded-2121' + set +o xtrace ----------------------------------------------------------------------------------- create namespace mongod-major-upgrade-sharded-2121 ----------------------------------------------------------------------------------- + kubectl_bin create namespace mongod-major-upgrade-sharded-2121 ++ mktemp + local LAST_OUT=/tmp/tmp.iCYM9q1T1O ++ mktemp + local LAST_ERR=/tmp/tmp.P22dgLbM8f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace mongod-major-upgrade-sharded-2121 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iCYM9q1T1O namespace/mongod-major-upgrade-sharded-2121 created + cat /tmp/tmp.P22dgLbM8f + rm /tmp/tmp.iCYM9q1T1O /tmp/tmp.P22dgLbM8f + return 0 + set_kube_ctx mongod-major-upgrade-sharded-2121 + local namespace=mongod-major-upgrade-sharded-2121 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.4TmrDgGbJw +++ mktemp ++ local LAST_ERR=/tmp/tmp.D5xdMp8Xfo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4TmrDgGbJw ++ cat /tmp/tmp.D5xdMp8Xfo ++ rm /tmp/tmp.4TmrDgGbJw /tmp/tmp.D5xdMp8Xfo ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster10 --namespace=mongod-major-upgrade-sharded-2121 ++ mktemp + local LAST_OUT=/tmp/tmp.g6vYDXXTc3 ++ mktemp + local LAST_ERR=/tmp/tmp.vClALOMq0X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster10 --namespace=mongod-major-upgrade-sharded-2121 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g6vYDXXTc3 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2272-ae4e3cbc-4-cluster10" modified. + cat /tmp/tmp.vClALOMq0X + rm /tmp/tmp.g6vYDXXTc3 /tmp/tmp.vClALOMq0X + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.5GkTTGio0G ++ mktemp + local LAST_ERR=/tmp/tmp.S1OgFY0JY8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5GkTTGio0G secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.S1OgFY0JY8 + rm /tmp/tmp.5GkTTGio0G /tmp/tmp.S1OgFY0JY8 + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.fZaB33FdBV ++ mktemp + local LAST_ERR=/tmp/tmp.Hw9CGVCZKW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fZaB33FdBV deployment.apps/psmdb-client created secret/some-users created + cat /tmp/tmp.Hw9CGVCZKW + rm /tmp/tmp.fZaB33FdBV /tmp/tmp.Hw9CGVCZKW + return 0 + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.dep.json /tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.dep.json + generate_vs_json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.json /tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.json + local template_path=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.json + local target_path=/tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.json ++ jq '.versions[0].operator="1.23.0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.json + local 'version_service_source={ "versions": [ { "operator": "1.23.0", "product": "psmdb-operator", "matrix": { "mongod": {}, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' + for image_mongod in ${IMAGE_MONGOD_CHAIN[@]} ++ get_mongod_ver_from_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 ++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 +++ run_simple_cli_inside_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 'mongod --version' +++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 +++ local 'cli=mongod --version' +++ local pod_name=32008 +++ kubectl_bin -n default run 32008 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 --restart=Never --command -- sleep infinity +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gn55evBgRF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vKwlcn66y5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 32008 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gn55evBgRF +++ cat /tmp/tmp.vKwlcn66y5 +++ rm /tmp/tmp.gn55evBgRF /tmp/tmp.vKwlcn66y5 +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/32008 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.22ov8RgsSW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3u7MWVHYur +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/32008 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.22ov8RgsSW +++ cat /tmp/tmp.3u7MWVHYur +++ rm /tmp/tmp.22ov8RgsSW /tmp/tmp.3u7MWVHYur +++ return 0 ++++ kubectl_bin -n default exec 32008 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cYhZ2naWt1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ntvXsKBDA8 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 32008 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.cYhZ2naWt1 ++++ cat /tmp/tmp.ntvXsKBDA8 ++++ rm /tmp/tmp.cYhZ2naWt1 /tmp/tmp.ntvXsKBDA8 ++++ return 0 +++ local 'output=db version v6.0.27-21 Build Info: { "version": "6.0.27-21", "gitVersion": "7b47bbb85ce39c999dc6f02ec3ebe4c45644e5b4", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "TDE", "FIPSMode", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/32008 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n3jvd8kuMK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KOw1rIAgPn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/32008 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.n3jvd8kuMK +++ cat /tmp/tmp.KOw1rIAgPn Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.n3jvd8kuMK /tmp/tmp.KOw1rIAgPn +++ return 0 +++ echo db version v6.0.27-21 Build Info: '{' '"version":' '"6.0.27-21",' '"gitVersion":' '"7b47bbb85ce39c999dc6f02ec3ebe4c45644e5b4",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"TDE",' '"FIPSMode",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=6.0.27-21 ++ [[ ! 6.0.27-21 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 6.0.27-21 + current_mongod_version=6.0.27-21 ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.23.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' ++ jq '.versions[0].matrix.mongod += {"6.0.27-21": {"image_path":"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.23.0", "product": "psmdb-operator", "matrix": { "mongod": { "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' + for image_mongod in ${IMAGE_MONGOD_CHAIN[@]} ++ get_mongod_ver_from_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ run_simple_cli_inside_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ local pod_name=7476 +++ kubectl_bin -n default run 7476 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PE38JpJRsF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yFyXaR2O1Q +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 7476 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PE38JpJRsF +++ cat /tmp/tmp.yFyXaR2O1Q +++ rm /tmp/tmp.PE38JpJRsF /tmp/tmp.yFyXaR2O1Q +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/7476 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZVVGRdI4gM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YnKOnASKuo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/7476 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZVVGRdI4gM +++ cat /tmp/tmp.YnKOnASKuo +++ rm /tmp/tmp.ZVVGRdI4gM /tmp/tmp.YnKOnASKuo +++ return 0 ++++ kubectl_bin -n default exec 7476 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.szeokVkuI2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1e0fiTS5EN ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 7476 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.szeokVkuI2 ++++ cat /tmp/tmp.1e0fiTS5EN ++++ rm /tmp/tmp.szeokVkuI2 /tmp/tmp.1e0fiTS5EN ++++ return 0 +++ local 'output=db version v7.0.30-16 Build Info: { "version": "7.0.30-16", "gitVersion": "cdfac0aedc386df60083285302f18ecef4c5382e", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/7476 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YzZ67UteMM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0pMUkQCd8H +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/7476 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YzZ67UteMM +++ cat /tmp/tmp.0pMUkQCd8H Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.YzZ67UteMM /tmp/tmp.0pMUkQCd8H +++ return 0 +++ echo db version v7.0.30-16 Build Info: '{' '"version":' '"7.0.30-16",' '"gitVersion":' '"cdfac0aedc386df60083285302f18ecef4c5382e",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.30-16 ++ [[ ! 7.0.30-16 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.30-16 + current_mongod_version=7.0.30-16 ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.23.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' ++ jq '.versions[0].matrix.mongod += {"7.0.30-16": {"image_path":"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.23.0", "product": "psmdb-operator", "matrix": { "mongod": { "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.30-16": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' + for image_mongod in ${IMAGE_MONGOD_CHAIN[@]} ++ get_mongod_ver_from_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ local 'cli=mongod --version' +++ local pod_name=23501 +++ kubectl_bin -n default run 23501 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.baz6ZYjc6A ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NrkMMW1KbU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 23501 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.baz6ZYjc6A +++ cat /tmp/tmp.NrkMMW1KbU +++ rm /tmp/tmp.baz6ZYjc6A /tmp/tmp.NrkMMW1KbU +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/23501 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VQSXYKjOMB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.g3ukC5A9my +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/23501 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.VQSXYKjOMB +++ cat /tmp/tmp.g3ukC5A9my +++ rm /tmp/tmp.VQSXYKjOMB /tmp/tmp.g3ukC5A9my +++ return 0 ++++ kubectl_bin -n default exec 23501 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.z9m22xauOf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UCIg8qfAin ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 23501 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.z9m22xauOf ++++ cat /tmp/tmp.UCIg8qfAin ++++ rm /tmp/tmp.z9m22xauOf /tmp/tmp.UCIg8qfAin ++++ return 0 +++ local 'output=db version v8.0.19-7 Build Info: { "version": "8.0.19-7", "gitVersion": "902b12dbea631904a8175499a56c8bb95b708483", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/23501 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.t0NioTARGQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9xHpJGrrOg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/23501 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.t0NioTARGQ +++ cat /tmp/tmp.9xHpJGrrOg Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.t0NioTARGQ /tmp/tmp.9xHpJGrrOg +++ return 0 +++ echo db version v8.0.19-7 Build Info: '{' '"version":' '"8.0.19-7",' '"gitVersion":' '"902b12dbea631904a8175499a56c8bb95b708483",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.19-7 ++ [[ ! 8.0.19-7 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.19-7 + current_mongod_version=8.0.19-7 ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.23.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' ++ jq '.versions[0].matrix.mongod += {"8.0.19-7": {"image_path":"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.23.0", "product": "psmdb-operator", "matrix": { "mongod": { "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.30-16": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" }, "8.0.19-7": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.23.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '},' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' +++ get_pbm_version docker.io/perconalab/percona-server-mongodb-operator:main-backup +++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-backup ++++ run_simple_cli_inside_image docker.io/perconalab/percona-server-mongodb-operator:main-backup 'pbm-agent version' ++++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-backup ++++ local 'cli=pbm-agent version' ++++ local pod_name=23296 ++++ kubectl_bin -n default run 23296 --image=docker.io/perconalab/percona-server-mongodb-operator:main-backup --restart=Never --command -- sleep infinity ++++ /usr/sbin/sed -r 's/^Version:\ (([0-9]+\.){2}[0-9]+)\ .*/\1/g' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.DAJYSQtSZR +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.S5vPuGvF83 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default run 23296 --image=docker.io/perconalab/percona-server-mongodb-operator:main-backup --restart=Never --command -- sleep infinity ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.DAJYSQtSZR ++++ cat /tmp/tmp.S5vPuGvF83 ++++ rm /tmp/tmp.DAJYSQtSZR /tmp/tmp.S5vPuGvF83 ++++ return 0 ++++ kubectl_bin -n default wait --for=condition=Ready pod/23296 +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cGFCFPWOmh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ofvIhPsrRA ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default wait --for=condition=Ready pod/23296 ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.cGFCFPWOmh ++++ cat /tmp/tmp.ofvIhPsrRA ++++ rm /tmp/tmp.cGFCFPWOmh /tmp/tmp.ofvIhPsrRA ++++ return 0 +++++ kubectl_bin -n default exec 23296 -- bash -c 'pbm-agent version 2>&1' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.8PBmmoBHcQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.n3nvIhwvWm +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in $(seq 0 2) +++++ set +e +++++ kubectl -n default exec 23296 -- bash -c 'pbm-agent version 2>&1' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.8PBmmoBHcQ +++++ cat /tmp/tmp.n3nvIhwvWm +++++ rm /tmp/tmp.8PBmmoBHcQ /tmp/tmp.n3nvIhwvWm +++++ return 0 ++++ local 'output=Version: 2.13.0 Platform: linux/amd64 GitCommit: e9b57e1395fe2602bd9637d5e4ef19b1df2bb3b7 GitBranch: release-2.13.0 BuildTime: 2026-02-26_12:41_UTC GoVersion: go1.25.7' ++++ kubectl_bin -n default delete pod/23296 --grace-period=0 --force +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2E5g6PqmgN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KUAcGGAF8k ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default delete pod/23296 --grace-period=0 --force ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.2E5g6PqmgN ++++ cat /tmp/tmp.KUAcGGAF8k Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ++++ rm /tmp/tmp.2E5g6PqmgN /tmp/tmp.KUAcGGAF8k ++++ return 0 ++++ echo Version: 2.13.0 Platform: linux/amd64 GitCommit: e9b57e1395fe2602bd9637d5e4ef19b1df2bb3b7 GitBranch: release-2.13.0 BuildTime: 2026-02-26_12:41_UTC GoVersion: go1.25.7 +++ local version_info=2.13.0 +++ [[ ! 2.13.0 =~ ^([0-9]+\.){2}[0-9]+$ ]] +++ echo 2.13.0 ++ jq '.versions[0].matrix.backup += {"2.13.0": {"image_path":"docker.io/perconalab/percona-server-mongodb-operator:main-backup","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.23.0", "product": "psmdb-operator", "matrix": { "mongod": { "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.30-16": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" }, "8.0.19-7": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": { "2.13.0": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-backup", "status": "recommended" } }, "operator": {} } } ] }' ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.23.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '},' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{' '"2.13.0":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-backup",' '"status":' '"recommended"' '}' '},' '"operator":' '{}' '}' '}' ']' '}' ++ jq '.versions[0].matrix.operator += {"1.23.0": {"image_path":"docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.23.0", "product": "psmdb-operator", "matrix": { "mongod": { "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.30-16": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" }, "8.0.19-7": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": { "2.13.0": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-backup", "status": "recommended" } }, "operator": { "1.23.0": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc", "status": "recommended" } } } } ] }' + jq . + echo '{' '"versions":' '[' '{' '"operator":' '"1.23.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '},' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{' '"2.13.0":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-backup",' '"status":' '"recommended"' '}' '},' '"operator":' '{' '"1.23.0":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc",' '"status":' '"recommended"' '}' '}' '}' '}' ']' '}' + kubectl_bin create configmap -n psmdb-operator versions --from-file /tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.dep.json --from-file /tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.M34jkLG1YM ++ mktemp + local LAST_ERR=/tmp/tmp.y9aHR89HwI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create configmap -n psmdb-operator versions --from-file /tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.dep.json --from-file /tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M34jkLG1YM configmap/versions created + cat /tmp/tmp.y9aHR89HwI + rm /tmp/tmp.M34jkLG1YM /tmp/tmp.y9aHR89HwI + return 0 + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/vs.yml + /usr/sbin/sed -r s#operator.9.9.9.psmdb-operator#operator.1.23.0.psmdb-operator#g + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.8oyyZ909IS ++ mktemp + local LAST_ERR=/tmp/tmp.KnVaJ5ho14 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8oyyZ909IS deployment.apps/version-service created service/version-service created + cat /tmp/tmp.KnVaJ5ho14 + rm /tmp/tmp.8oyyZ909IS /tmp/tmp.KnVaJ5ho14 + return 0 ++ jq '.[] | .[] |.matrix.mongod' /tmp/tmp.Tm1qJlhCiF/operator.1.23.0.psmdb-operator.json + version_matrix='{ "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.30-16": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" }, "8.0.19-7": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0", "status": "recommended" } }' ++ get_mongod_ver_from_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ run_simple_cli_inside_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ local 'cli=mongod --version' +++ local pod_name=23360 +++ kubectl_bin -n default run 23360 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3QYebtCqYJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.36ncgenxLy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 23360 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3QYebtCqYJ +++ cat /tmp/tmp.36ncgenxLy +++ rm /tmp/tmp.3QYebtCqYJ /tmp/tmp.36ncgenxLy +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/23360 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iAZIzolrWx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nfBTMDEr7w +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/23360 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iAZIzolrWx +++ cat /tmp/tmp.nfBTMDEr7w +++ rm /tmp/tmp.iAZIzolrWx /tmp/tmp.nfBTMDEr7w +++ return 0 ++++ kubectl_bin -n default exec 23360 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.JMFfYgjn53 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.q98B4EB0Uc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 23360 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.JMFfYgjn53 ++++ cat /tmp/tmp.q98B4EB0Uc ++++ rm /tmp/tmp.JMFfYgjn53 /tmp/tmp.q98B4EB0Uc ++++ return 0 +++ local 'output=db version v8.0.19-7 Build Info: { "version": "8.0.19-7", "gitVersion": "902b12dbea631904a8175499a56c8bb95b708483", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/23360 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6ckzJxqs5b ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ETz4MeZ3Ke +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/23360 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6ckzJxqs5b +++ cat /tmp/tmp.ETz4MeZ3Ke Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.6ckzJxqs5b /tmp/tmp.ETz4MeZ3Ke +++ return 0 +++ echo db version v8.0.19-7 Build Info: '{' '"version":' '"8.0.19-7",' '"gitVersion":' '"902b12dbea631904a8175499a56c8bb95b708483",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.19-7 ++ [[ ! 8.0.19-7 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.19-7 + current_mongod_version=8.0.19-7 ++ echo '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '},' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '}' ++ jq '. += {"8.0.19-7":{"image_path":"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0","status": "recommended"}}' + version_matrix='{ "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.30-16": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" }, "8.0.19-7": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0", "status": "recommended" } }' ++ echo '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '},' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '}' ++ jq 'to_entries | sort_by( .key | split("[[:punct:]]";"g") | map(tonumber) ) | map({(.key): .value}) ' + version_matrix='[ { "6.0.27-21": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" } }, { "7.0.30-16": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } }, { "8.0.19-7": { "image_path": "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0", "status": "recommended" } } ]' ++ echo '[' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '{' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '},' '{' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '}' ']' ++ jq -r '.[] | keys | .[] | split(".") | .[:2] | join(".")' ++ uniq ++ tail -n +2 + versions_to_verify='7.0 8.0' + cluster=some-name ++ echo '[' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '{' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '},' '{' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '}' ']' ++ jq '.[0] | keys | .[0]' + desc 'Starting the cluster with IMAGE_MONGOD "6.0.27-21"' + set +o xtrace ----------------------------------------------------------------------------------- Starting the cluster with IMAGE_MONGOD "6.0.27-21" ----------------------------------------------------------------------------------- ++ echo '[' '{' '"6.0.27-21":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '{' '"7.0.30-16":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '},' '{' '"8.0.19-7":' '{' '"image_path":' '"docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0",' '"status":' '"recommended"' '}' '}' ']' ++ jq -r 'to_entries | .[0].value | .[].image_path' + export IMAGE_MONGOD=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 + IMAGE_MONGOD=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 + qualify_image_var IMAGE_MONGOD + local var=IMAGE_MONGOD + local val=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 + local out= + local ref + [[ -z docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 ]] + [[ docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 == *\ * ]] ++ qualify_image_ref docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 ++ local ref=docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 ++ local first=docker.io ++ [[ docker.io == *.* ]] ++ printf %s docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 ++ return 0 + printf -v IMAGE_MONGOD %s docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/some-name.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod6.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2272-ae4e3cbc"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/mongod-major-upgrade-sharded-2121/g + local LAST_OUT=/tmp/tmp.JzudEftilz ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.lDXwZ2AwsG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JzudEftilz perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.lDXwZ2AwsG + rm /tmp/tmp.JzudEftilz /tmp/tmp.lDXwZ2AwsG + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lo5ldiE7ds +++ mktemp ++ local LAST_ERR=/tmp/tmp.3XE4jfrkFM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lo5ldiE7ds ++ cat /tmp/tmp.3XE4jfrkFM ++ rm /tmp/tmp.lo5ldiE7ds /tmp/tmp.3XE4jfrkFM ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qEaSGPpA9h +++ mktemp ++ local LAST_ERR=/tmp/tmp.KTkjhzpsR7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qEaSGPpA9h ++ cat /tmp/tmp.KTkjhzpsR7 ++ rm /tmp/tmp.qEaSGPpA9h /tmp/tmp.KTkjhzpsR7 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wwogaMOy0a +++ mktemp ++ local LAST_ERR=/tmp/tmp.cmqDIGmwCK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wwogaMOy0a ++ cat /tmp/tmp.cmqDIGmwCK ++ rm /tmp/tmp.wwogaMOy0a /tmp/tmp.cmqDIGmwCK ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................... + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1W2d5lpWYe +++ mktemp ++ local LAST_ERR=/tmp/tmp.TiScSWZGA4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1W2d5lpWYe ++ cat /tmp/tmp.TiScSWZGA4 ++ rm /tmp/tmp.1W2d5lpWYe /tmp/tmp.TiScSWZGA4 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jvTum1L325 +++ mktemp ++ local LAST_ERR=/tmp/tmp.P3piMOJCuB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jvTum1L325 ++ cat /tmp/tmp.P3piMOJCuB ++ rm /tmp/tmp.jvTum1L325 /tmp/tmp.P3piMOJCuB ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JtGi5w7xrU +++ mktemp ++ local LAST_ERR=/tmp/tmp.dkkq67FDwX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JtGi5w7xrU ++ cat /tmp/tmp.dkkq67FDwX ++ rm /tmp/tmp.JtGi5w7xrU /tmp/tmp.dkkq67FDwX ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sMfewc83Ph +++ mktemp ++ local LAST_ERR=/tmp/tmp.1E3ymcR3Bw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sMfewc83Ph ++ cat /tmp/tmp.1E3ymcR3Bw ++ rm /tmp/tmp.sMfewc83Ph /tmp/tmp.1E3ymcR3Bw ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WFc91Ktyel +++ mktemp ++ local LAST_ERR=/tmp/tmp.WEFSMUKDv4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WFc91Ktyel ++ cat /tmp/tmp.WEFSMUKDv4 ++ rm /tmp/tmp.WFc91Ktyel /tmp/tmp.WEFSMUKDv4 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DgJAuyEL12 +++ mktemp ++ local LAST_ERR=/tmp/tmp.e6wIoDspfU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DgJAuyEL12 ++ cat /tmp/tmp.e6wIoDspfU ++ rm /tmp/tmp.DgJAuyEL12 /tmp/tmp.e6wIoDspfU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PslrEchADi +++ mktemp ++ local LAST_ERR=/tmp/tmp.WpX6xPs4zc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PslrEchADi ++ cat /tmp/tmp.WpX6xPs4zc ++ rm /tmp/tmp.PslrEchADi /tmp/tmp.WpX6xPs4zc ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-2121 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-2121 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7EvUqyA5SY +++ mktemp ++ local LAST_ERR=/tmp/tmp.OSEIFRSN91 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7EvUqyA5SY ++ cat /tmp/tmp.OSEIFRSN91 ++ rm /tmp/tmp.7EvUqyA5SY /tmp/tmp.OSEIFRSN91 ++ return 0 + local client_container=psmdb-client-bb8b97679-f8lw8 + kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.aeyEirMk9e ++ mktemp + local LAST_ERR=/tmp/tmp.BzD6O0xftc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aeyEirMk9e Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("1a2da1f7-0f28-43ff-b1d0-2c0c32023bfb") } Percona Server for MongoDB server version: v6.0.27-21 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.BzD6O0xftc + rm /tmp/tmp.aeyEirMk9e /tmp/tmp.BzD6O0xftc + return 0 + sleep 2 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.D1BlJRMiyk +++ mktemp ++ local LAST_ERR=/tmp/tmp.3gsgKvawlJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D1BlJRMiyk ++ cat /tmp/tmp.3gsgKvawlJ ++ rm /tmp/tmp.D1BlJRMiyk /tmp/tmp.3gsgKvawlJ ++ return 0 + local client_container=psmdb-client-bb8b97679-f8lw8 + kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.qjU7bzxUpG ++ mktemp + local LAST_ERR=/tmp/tmp.f8k7OBHzAe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qjU7bzxUpG Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("3eaf770f-dfd0-4c7f-bfb5-7e80e18436f0") } Percona Server for MongoDB server version: v6.0.27-21 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.f8k7OBHzAe + rm /tmp/tmp.qjU7bzxUpG /tmp/tmp.f8k7OBHzAe + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local command=find + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-03-26T09:35:36+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.akjaEvmtYB +++ mktemp ++ local LAST_ERR=/tmp/tmp.4ypyoynNp7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.akjaEvmtYB ++ cat /tmp/tmp.4ypyoynNp7 ++ rm /tmp/tmp.akjaEvmtYB /tmp/tmp.4ypyoynNp7 ++ return 0 + local client_container=psmdb-client-bb8b97679-f8lw8 + kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.ar0dbB5gCi ++ mktemp + local LAST_ERR=/tmp/tmp.J3nfdNKLIe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ar0dbB5gCi + cat /tmp/tmp.J3nfdNKLIe + rm /tmp/tmp.ar0dbB5gCi /tmp/tmp.J3nfdNKLIe + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/compare/find.json /tmp/tmp.Tm1qJlhCiF/find + desc 'Starting to follow mongod upgrade images chain' + set +o xtrace ----------------------------------------------------------------------------------- Starting to follow mongod upgrade images chain ----------------------------------------------------------------------------------- + target_generation=2 + for version in ${versions_to_verify[@]} + desc 'Testing upgrade to version: 7.0' + set +o xtrace ----------------------------------------------------------------------------------- Testing upgrade to version: 7.0 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "7.0-recommended"}, ]' ++ mktemp + local LAST_OUT=/tmp/tmp.GWvIcR7gQd ++ mktemp + local LAST_ERR=/tmp/tmp.uhJfarSxWI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "7.0-recommended"}, ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GWvIcR7gQd perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.uhJfarSxWI + rm /tmp/tmp.GWvIcR7gQd /tmp/tmp.uhJfarSxWI + return 0 + sleep 70 + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.atfmRdarss +++ mktemp ++ local LAST_ERR=/tmp/tmp.vlwIh3KSaK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.atfmRdarss ++ cat /tmp/tmp.vlwIh3KSaK ++ rm /tmp/tmp.atfmRdarss /tmp/tmp.vlwIh3KSaK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sMnzzPOi2o +++ mktemp ++ local LAST_ERR=/tmp/tmp.PVHqZiHsM2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sMnzzPOi2o ++ cat /tmp/tmp.PVHqZiHsM2 ++ rm /tmp/tmp.sMnzzPOi2o /tmp/tmp.PVHqZiHsM2 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RjM29mHEgd +++ mktemp ++ local LAST_ERR=/tmp/tmp.SoBEoNgzBm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RjM29mHEgd ++ cat /tmp/tmp.SoBEoNgzBm ++ rm /tmp/tmp.RjM29mHEgd /tmp/tmp.SoBEoNgzBm ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness....................................................................................................................................................... + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HbpoikLyLR +++ mktemp ++ local LAST_ERR=/tmp/tmp.ANwENQ7cU5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HbpoikLyLR ++ cat /tmp/tmp.ANwENQ7cU5 ++ rm /tmp/tmp.HbpoikLyLR /tmp/tmp.ANwENQ7cU5 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YHlagb92cK +++ mktemp ++ local LAST_ERR=/tmp/tmp.aIQePxM7DZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YHlagb92cK ++ cat /tmp/tmp.aIQePxM7DZ ++ rm /tmp/tmp.YHlagb92cK /tmp/tmp.aIQePxM7DZ ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zo2ktnevVb +++ mktemp ++ local LAST_ERR=/tmp/tmp.9W2MkKjILX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zo2ktnevVb ++ cat /tmp/tmp.9W2MkKjILX ++ rm /tmp/tmp.zo2ktnevVb /tmp/tmp.9W2MkKjILX ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DtstmYaQuI +++ mktemp ++ local LAST_ERR=/tmp/tmp.SzujIOKtKq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DtstmYaQuI ++ cat /tmp/tmp.SzujIOKtKq ++ rm /tmp/tmp.DtstmYaQuI /tmp/tmp.SzujIOKtKq ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tvLPCCa8de +++ mktemp ++ local LAST_ERR=/tmp/tmp.FJrYNbx9DU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tvLPCCa8de ++ cat /tmp/tmp.FJrYNbx9DU ++ rm /tmp/tmp.tvLPCCa8de /tmp/tmp.FJrYNbx9DU ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0HlKpZ508T +++ mktemp ++ local LAST_ERR=/tmp/tmp.0BSfemtxoj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0HlKpZ508T ++ cat /tmp/tmp.0BSfemtxoj ++ rm /tmp/tmp.0HlKpZ508T /tmp/tmp.0BSfemtxoj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xTdzYMAQZp +++ mktemp ++ local LAST_ERR=/tmp/tmp.az0e9cEluf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xTdzYMAQZp ++ cat /tmp/tmp.az0e9cEluf ++ rm /tmp/tmp.xTdzYMAQZp /tmp/tmp.az0e9cEluf ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 2 statefulset some-name-cfg + local generation=2 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gEtXFvE0AX +++ mktemp ++ local LAST_ERR=/tmp/tmp.UWodUXMq9p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gEtXFvE0AX ++ cat /tmp/tmp.UWodUXMq9p ++ rm /tmp/tmp.gEtXFvE0AX /tmp/tmp.UWodUXMq9p ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset some-name-rs0 + local generation=2 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.87Be3Jh92A +++ mktemp ++ local LAST_ERR=/tmp/tmp.8dGQ0NFQpp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.87Be3Jh92A ++ cat /tmp/tmp.8dGQ0NFQpp ++ rm /tmp/tmp.87Be3Jh92A /tmp/tmp.8dGQ0NFQpp ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset some-name-mongos + local generation=2 + local resource_type=statefulset + local resource_name=some-name-mongos + local current_generation ++ kubectl_bin get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cAr409GuWW +++ mktemp ++ local LAST_ERR=/tmp/tmp.wMLoLpzulF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cAr409GuWW ++ cat /tmp/tmp.wMLoLpzulF ++ rm /tmp/tmp.cAr409GuWW /tmp/tmp.wMLoLpzulF ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.GDGcz5sWtm ++ mktemp + local LAST_ERR=/tmp/tmp.l9cYaD8Ryl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GDGcz5sWtm perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.l9cYaD8Ryl + rm /tmp/tmp.GDGcz5sWtm /tmp/tmp.l9cYaD8Ryl + return 0 + sleep 10 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fzfC00TjTr +++ mktemp ++ local LAST_ERR=/tmp/tmp.VCOvnOrIql ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fzfC00TjTr ++ cat /tmp/tmp.VCOvnOrIql ++ rm /tmp/tmp.fzfC00TjTr /tmp/tmp.VCOvnOrIql ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK ++ run_mongos 'JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121 ++ local 'command=JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ grep -E '^\{.*\}$' ++ jq -r .featureCompatibilityVersion.version +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Q1qYdGyabO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PPb0mvF47S +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Q1qYdGyabO +++ cat /tmp/tmp.PPb0mvF47S +++ rm /tmp/tmp.Q1qYdGyabO /tmp/tmp.PPb0mvF47S +++ return 0 ++ local client_container=psmdb-client-bb8b97679-f8lw8 ++ kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y3kqdjLlSN +++ mktemp ++ local LAST_ERR=/tmp/tmp.p1p5pyZcEx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y3kqdjLlSN ++ cat /tmp/tmp.p1p5pyZcEx ++ rm /tmp/tmp.Y3kqdjLlSN /tmp/tmp.p1p5pyZcEx ++ return 0 + currentFCV=7.0 + [[ 7.0 != 7.0 ]] + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.utuXm70pwJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZJkeQTV2KH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.utuXm70pwJ ++ cat /tmp/tmp.ZJkeQTV2KH ++ rm /tmp/tmp.utuXm70pwJ /tmp/tmp.ZJkeQTV2KH ++ return 0 + local client_container=psmdb-client-bb8b97679-f8lw8 + kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.9lz6rrdcfC ++ mktemp + local LAST_ERR=/tmp/tmp.dCBSnIWovA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9lz6rrdcfC Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c4c873f0-5180-4a4c-8085-26bbe7218367") } Percona Server for MongoDB server version: v7.0.30-16 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.dCBSnIWovA + rm /tmp/tmp.9lz6rrdcfC /tmp/tmp.dCBSnIWovA + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 -2 + local command=find + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local postfix=-2 + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-03-26T09:43:21+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xmunm7NTgL +++ mktemp ++ local LAST_ERR=/tmp/tmp.tb6jTQhaIv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xmunm7NTgL ++ cat /tmp/tmp.tb6jTQhaIv ++ rm /tmp/tmp.xmunm7NTgL /tmp/tmp.tb6jTQhaIv ++ return 0 + local client_container=psmdb-client-bb8b97679-f8lw8 + kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.IL1vAJ96ga ++ mktemp + local LAST_ERR=/tmp/tmp.Agsfgx0UrY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IL1vAJ96ga + cat /tmp/tmp.Agsfgx0UrY + rm /tmp/tmp.IL1vAJ96ga /tmp/tmp.Agsfgx0UrY + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/compare/find-2.json /tmp/tmp.Tm1qJlhCiF/find-2 + target_generation=3 + for version in ${versions_to_verify[@]} + desc 'Testing upgrade to version: 8.0' + set +o xtrace ----------------------------------------------------------------------------------- Testing upgrade to version: 8.0 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "8.0-recommended"}, ]' ++ mktemp + local LAST_OUT=/tmp/tmp.wGdIfyJCKk ++ mktemp + local LAST_ERR=/tmp/tmp.18uWFgJWjO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "8.0-recommended"}, ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wGdIfyJCKk perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.18uWFgJWjO + rm /tmp/tmp.wGdIfyJCKk /tmp/tmp.18uWFgJWjO + return 0 + sleep 70 + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YdyRJU7CSW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ub8N5Mxp30 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YdyRJU7CSW ++ cat /tmp/tmp.Ub8N5Mxp30 ++ rm /tmp/tmp.YdyRJU7CSW /tmp/tmp.Ub8N5Mxp30 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fJK80aXjJE +++ mktemp ++ local LAST_ERR=/tmp/tmp.5BmHW5Cmgc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fJK80aXjJE ++ cat /tmp/tmp.5BmHW5Cmgc ++ rm /tmp/tmp.fJK80aXjJE /tmp/tmp.5BmHW5Cmgc ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pr2I8y1OS9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.X7eSoV1D9D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pr2I8y1OS9 ++ cat /tmp/tmp.X7eSoV1D9D ++ rm /tmp/tmp.pr2I8y1OS9 /tmp/tmp.X7eSoV1D9D ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................................................ + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NEQPtLUQhx +++ mktemp ++ local LAST_ERR=/tmp/tmp.YyLPiQAK2F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NEQPtLUQhx ++ cat /tmp/tmp.YyLPiQAK2F ++ rm /tmp/tmp.NEQPtLUQhx /tmp/tmp.YyLPiQAK2F ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BT918lP0hK +++ mktemp ++ local LAST_ERR=/tmp/tmp.FeV48NYHGV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BT918lP0hK ++ cat /tmp/tmp.FeV48NYHGV ++ rm /tmp/tmp.BT918lP0hK /tmp/tmp.FeV48NYHGV ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CzhXiDS6BU +++ mktemp ++ local LAST_ERR=/tmp/tmp.fRKOkb85j3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CzhXiDS6BU ++ cat /tmp/tmp.fRKOkb85j3 ++ rm /tmp/tmp.CzhXiDS6BU /tmp/tmp.fRKOkb85j3 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hRMcvsynpJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.RtEOk4IlFn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hRMcvsynpJ ++ cat /tmp/tmp.RtEOk4IlFn ++ rm /tmp/tmp.hRMcvsynpJ /tmp/tmp.RtEOk4IlFn ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1Ex693dtgN +++ mktemp ++ local LAST_ERR=/tmp/tmp.s3NsjDdBFW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1Ex693dtgN ++ cat /tmp/tmp.s3NsjDdBFW ++ rm /tmp/tmp.1Ex693dtgN /tmp/tmp.s3NsjDdBFW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Mr34idXs6U +++ mktemp ++ local LAST_ERR=/tmp/tmp.MRnBLaRwhL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Mr34idXs6U ++ cat /tmp/tmp.MRnBLaRwhL ++ rm /tmp/tmp.Mr34idXs6U /tmp/tmp.MRnBLaRwhL ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qh1a8M14fp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qdj7nTlxrK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qh1a8M14fp ++ cat /tmp/tmp.Qdj7nTlxrK ++ rm /tmp/tmp.qh1a8M14fp /tmp/tmp.Qdj7nTlxrK ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_generation 3 statefulset some-name-cfg + local generation=3 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PqtCLU3Mta +++ mktemp ++ local LAST_ERR=/tmp/tmp.8JEVQV4kWA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PqtCLU3Mta ++ cat /tmp/tmp.8JEVQV4kWA ++ rm /tmp/tmp.PqtCLU3Mta /tmp/tmp.8JEVQV4kWA ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + compare_generation 3 statefulset some-name-rs0 + local generation=3 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AGCxXMiCyx +++ mktemp ++ local LAST_ERR=/tmp/tmp.gqqgoEvEkd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AGCxXMiCyx ++ cat /tmp/tmp.gqqgoEvEkd ++ rm /tmp/tmp.AGCxXMiCyx /tmp/tmp.gqqgoEvEkd ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + compare_generation 3 statefulset some-name-mongos + local generation=3 + local resource_type=statefulset + local resource_name=some-name-mongos + local current_generation ++ kubectl_bin get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QE3y45cBfn +++ mktemp ++ local LAST_ERR=/tmp/tmp.eKZFI3RqEp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QE3y45cBfn ++ cat /tmp/tmp.eKZFI3RqEp ++ rm /tmp/tmp.QE3y45cBfn /tmp/tmp.eKZFI3RqEp ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.CNfUebdL6j ++ mktemp + local LAST_ERR=/tmp/tmp.M7wkXN73sV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CNfUebdL6j perconaservermongodb.psmdb.percona.com/some-name patched (no change) + cat /tmp/tmp.M7wkXN73sV + rm /tmp/tmp.CNfUebdL6j /tmp/tmp.M7wkXN73sV + return 0 + sleep 10 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.voeWa5qAtP +++ mktemp ++ local LAST_ERR=/tmp/tmp.p2JHjzLyRA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.voeWa5qAtP ++ cat /tmp/tmp.p2JHjzLyRA ++ rm /tmp/tmp.voeWa5qAtP /tmp/tmp.p2JHjzLyRA ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK ++ run_mongos 'JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121 ++ local 'command=JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local port=27017 ++ local mongo_bin=mongo ++ grep -E '^\{.*\}$' ++ jq -r .featureCompatibilityVersion.version +++ echo .svc.cluster.local +++ awk -F: '{print $2}' ++ suffix_port= ++ [[ -z '' ]] ++ suffix=.svc.cluster.local:27017 +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YcWRpHN6xw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OaiSfi5S0z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YcWRpHN6xw +++ cat /tmp/tmp.OaiSfi5S0z +++ rm /tmp/tmp.YcWRpHN6xw /tmp/tmp.OaiSfi5S0z +++ return 0 ++ local client_container=psmdb-client-bb8b97679-f8lw8 ++ kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T0CisWVVt7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TxzDMCr7IR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T0CisWVVt7 ++ cat /tmp/tmp.TxzDMCr7IR ++ rm /tmp/tmp.T0CisWVVt7 /tmp/tmp.TxzDMCr7IR ++ return 0 + currentFCV=8.0 + [[ 8.0 != 8.0 ]] + run_mongos 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FojdI4bZUb +++ mktemp ++ local LAST_ERR=/tmp/tmp.yQNUmFoRW0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FojdI4bZUb ++ cat /tmp/tmp.yQNUmFoRW0 ++ rm /tmp/tmp.FojdI4bZUb /tmp/tmp.yQNUmFoRW0 ++ return 0 + local client_container=psmdb-client-bb8b97679-f8lw8 + kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.KCZbmUdsuO ++ mktemp + local LAST_ERR=/tmp/tmp.ECM5b8JCUm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KCZbmUdsuO Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c6b30577-7b4f-4c97-905d-61f57d676955") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ECM5b8JCUm + rm /tmp/tmp.KCZbmUdsuO /tmp/tmp.ECM5b8JCUm + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 -3 + local command=find + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local postfix=-3 + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2026-03-26T09:50:12+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + grep -E -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RUC6gu338g +++ mktemp ++ local LAST_ERR=/tmp/tmp.la4YytJ54F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RUC6gu338g ++ cat /tmp/tmp.la4YytJ54F ++ rm /tmp/tmp.RUC6gu338g /tmp/tmp.la4YytJ54F ++ return 0 + local client_container=psmdb-client-bb8b97679-f8lw8 + kubectl_bin exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.QxxUZJXYtu ++ mktemp + local LAST_ERR=/tmp/tmp.LZBs3vQALo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-f8lw8 -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-2121.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QxxUZJXYtu + cat /tmp/tmp.LZBs3vQALo + rm /tmp/tmp.QxxUZJXYtu /tmp/tmp.LZBs3vQALo + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/e2e-tests/mongod-major-upgrade-sharded/compare/find-3.json /tmp/tmp.Tm1qJlhCiF/find-3 + target_generation=4 + destroy mongod-major-upgrade-sharded-2121 + local namespace=mongod-major-upgrade-sharded-2121 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.4jyJRZN9vz +++ mktemp ++ local LAST_ERR=/tmp/tmp.HEsDxErXy0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4jyJRZN9vz ++ cat /tmp/tmp.HEsDxErXy0 No resources found in mongod-major-upgrade-sharded-2121 namespace. ++ rm /tmp/tmp.4jyJRZN9vz /tmp/tmp.HEsDxErXy0 ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.E2lyfnK4cQ ++ mktemp + local LAST_ERR=/tmp/tmp.roaFrTf4nU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E2lyfnK4cQ customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.roaFrTf4nU + rm /tmp/tmp.E2lyfnK4cQ /tmp/tmp.roaFrTf4nU + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.O2my1wULEE ++ mktemp + local LAST_ERR=/tmp/tmp.TP6IOa9fBl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O2my1wULEE + cat /tmp/tmp.TP6IOa9fBl + rm /tmp/tmp.O2my1wULEE /tmp/tmp.TP6IOa9fBl + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UPwQJRK09c ++ mktemp + local LAST_ERR=/tmp/tmp.4tJmIieBMq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UPwQJRK09c + cat /tmp/tmp.4tJmIieBMq + rm /tmp/tmp.UPwQJRK09c /tmp/tmp.4tJmIieBMq + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.scovXkndXh ++ mktemp + local LAST_ERR=/tmp/tmp.hxLTvB3e2W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.scovXkndXh + cat /tmp/tmp.hxLTvB3e2W + rm /tmp/tmp.scovXkndXh /tmp/tmp.hxLTvB3e2W + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.lCNCauUz6k ++ mktemp + local LAST_ERR=/tmp/tmp.wwkdcNQKGT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2272/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lCNCauUz6k clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.wwkdcNQKGT + rm /tmp/tmp.lCNCauUz6k /tmp/tmp.wwkdcNQKGT + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.fI8VThFIPB ++ mktemp + local LAST_ERR=/tmp/tmp.3Psyzp36ju + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.fI8VThFIPB + cat /tmp/tmp.3Psyzp36ju Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.fI8VThFIPB + cat /tmp/tmp.3Psyzp36ju Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.fI8VThFIPB + cat /tmp/tmp.3Psyzp36ju Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.fI8VThFIPB + cat /tmp/tmp.3Psyzp36ju Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.fI8VThFIPB /tmp/tmp.3Psyzp36ju + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace mongod-major-upgrade-sharded-2121 + rm -rf /tmp/tmp.Tm1qJlhCiF ++ mktemp + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.XzTPpcIAGF + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.YkBe4zUCgL ++ mktemp + local LAST_ERR=/tmp/tmp.N4dS4zGDHm + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.SeNKHluXu4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace mongod-major-upgrade-sharded-2121 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator