++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/logs/mongod-major-upgrade-sharded.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/logs/mongod-major-upgrade-sharded.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ '[' v1.26.15-gke.1390000 ']' ++ GKE=1 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.26 + set_debug + [[ 1 == 1 ]] + set -o xtrace + main + rbac=rbac + '[' -n psmdb-operator ']' + rbac=cw-rbac + create_infra mongod-major-upgrade-sharded-18692 + local ns=mongod-major-upgrade-sharded-18692 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.fnse97Azbi ++ mktemp + local LAST_ERR=/tmp/tmp.DN4ggqgH1F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fnse97Azbi customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.DN4ggqgH1F + rm /tmp/tmp.fnse97Azbi /tmp/tmp.DN4ggqgH1F + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XY5If0IsqD ++ mktemp + local LAST_ERR=/tmp/tmp.zsIY4CxI9w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XY5If0IsqD + cat /tmp/tmp.zsIY4CxI9w + rm /tmp/tmp.XY5If0IsqD /tmp/tmp.zsIY4CxI9w + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.klzGlF7yDg ++ mktemp + local LAST_ERR=/tmp/tmp.84oq8w8lEz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.klzGlF7yDg + cat /tmp/tmp.84oq8w8lEz + rm /tmp/tmp.klzGlF7yDg /tmp/tmp.84oq8w8lEz + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.my4vh3LrHn ++ mktemp + local LAST_ERR=/tmp/tmp.oJm9lrYNW6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.my4vh3LrHn + cat /tmp/tmp.oJm9lrYNW6 + rm /tmp/tmp.my4vh3LrHn /tmp/tmp.oJm9lrYNW6 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.nC5dai9w9e ++ mktemp + local LAST_ERR=/tmp/tmp.4h9Poi2DOO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nC5dai9w9e clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.4h9Poi2DOO + rm /tmp/tmp.nC5dai9w9e /tmp/tmp.4h9Poi2DOO + return 0 + check_crd_for_deletion PR-1568-77a673e0 + local git_tag=PR-1568-77a673e0 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1568-77a673e0/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2EMqk3PQMe +++ mktemp ++ local LAST_ERR=/tmp/tmp.9ThZdc62Se ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.2EMqk3PQMe ++ cat /tmp/tmp.9ThZdc62Se Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.2EMqk3PQMe ++ cat /tmp/tmp.9ThZdc62Se Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.2EMqk3PQMe ++ cat /tmp/tmp.9ThZdc62Se Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.2EMqk3PQMe ++ cat /tmp/tmp.9ThZdc62Se Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.2EMqk3PQMe /tmp/tmp.9ThZdc62Se ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.B6NoWvo9up ++ mktemp + local LAST_OUT=/tmp/tmp.HeFE8dS4QF ++ mktemp + local LAST_ERR=/tmp/tmp.Um7J5AYqR5 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.SbW2fsbCOb + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HeFE8dS4QF + cat /tmp/tmp.SbW2fsbCOb + rm /tmp/tmp.HeFE8dS4QF /tmp/tmp.SbW2fsbCOb + return 0 namespace "mongod-major-upgrade-sharded-1830" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B6NoWvo9up namespace "psmdb-operator" deleted + cat /tmp/tmp.Um7J5AYqR5 + rm /tmp/tmp.B6NoWvo9up /tmp/tmp.Um7J5AYqR5 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.PtAL6nWnlg ++ mktemp + local LAST_ERR=/tmp/tmp.FBIpxOvqui + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PtAL6nWnlg + cat /tmp/tmp.FBIpxOvqui + rm /tmp/tmp.PtAL6nWnlg /tmp/tmp.FBIpxOvqui + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.jBB1DnX5F6 ++ mktemp + local LAST_ERR=/tmp/tmp.SkYSUZSL02 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jBB1DnX5F6 namespace/psmdb-operator created + cat /tmp/tmp.SkYSUZSL02 + rm /tmp/tmp.jBB1DnX5F6 /tmp/tmp.SkYSUZSL02 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.5cUlI5GeW0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.enf4AdiLlZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5cUlI5GeW0 ++ cat /tmp/tmp.enf4AdiLlZ ++ rm /tmp/tmp.5cUlI5GeW0 /tmp/tmp.enf4AdiLlZ ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1568-77a673e0-1-cluster3 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.BtJ9V7Eu9J ++ mktemp + local LAST_ERR=/tmp/tmp.3GeZARQYYm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1568-77a673e0-1-cluster3 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BtJ9V7Eu9J Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1568-77a673e0-1-cluster3" modified. + cat /tmp/tmp.3GeZARQYYm + rm /tmp/tmp.BtJ9V7Eu9J /tmp/tmp.3GeZARQYYm + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.5P5RLmm9XC ++ mktemp + local LAST_ERR=/tmp/tmp.yc0VwKZ4rt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5P5RLmm9XC customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.yc0VwKZ4rt + rm /tmp/tmp.5P5RLmm9XC /tmp/tmp.yc0VwKZ4rt + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.wvsaHKTcE7 ++ mktemp + local LAST_ERR=/tmp/tmp.3RHcWlDWME + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wvsaHKTcE7 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.3RHcWlDWME + rm /tmp/tmp.wvsaHKTcE7 /tmp/tmp.3RHcWlDWME + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1568-77a673e0") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RoDtcNfuUW ++ mktemp + local LAST_ERR=/tmp/tmp.DzgXAUB2w3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RoDtcNfuUW deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.DzgXAUB2w3 + rm /tmp/tmp.RoDtcNfuUW /tmp/tmp.DzgXAUB2w3 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.0BfFvRad3T +++ mktemp ++ local LAST_ERR=/tmp/tmp.k9s5YwuE6M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0BfFvRad3T ++ cat /tmp/tmp.k9s5YwuE6M ++ rm /tmp/tmp.0BfFvRad3T /tmp/tmp.k9s5YwuE6M ++ return 0 + wait_pod percona-server-mongodb-operator-7854db4b69-lx8sd + local pod=percona-server-mongodb-operator-7854db4b69-lx8sd + set +o xtrace waiting for pod/percona-server-mongodb-operator-7854db4b69-lx8sd to be ready..OK + create_namespace mongod-major-upgrade-sharded-18692 + local namespace=mongod-major-upgrade-sharded-18692 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces mongod-major-upgrade-sharded-18692' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces mongod-major-upgrade-sharded-18692 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace mongod-major-upgrade-sharded-18692 --ignore-not-found ++ mktemp + kubectl_bin get ns + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.5xM5xouKKP ++ mktemp + local LAST_ERR=/tmp/tmp.HFaAwUuyq8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.MLUkNhotjt + xargs kubectl delete ns ++ mktemp + local LAST_ERR=/tmp/tmp.Z1HvlwjEmX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace mongod-major-upgrade-sharded-18692 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5xM5xouKKP + cat /tmp/tmp.HFaAwUuyq8 + rm /tmp/tmp.5xM5xouKKP /tmp/tmp.HFaAwUuyq8 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MLUkNhotjt + cat /tmp/tmp.Z1HvlwjEmX + rm /tmp/tmp.MLUkNhotjt /tmp/tmp.Z1HvlwjEmX + return 0 + kubectl_bin wait --for=delete namespace mongod-major-upgrade-sharded-18692 ++ mktemp + local LAST_OUT=/tmp/tmp.jTQMSrTbiC ++ mktemp + local LAST_ERR=/tmp/tmp.mPdVZbjim4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace mongod-major-upgrade-sharded-18692 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jTQMSrTbiC + cat /tmp/tmp.mPdVZbjim4 + rm /tmp/tmp.jTQMSrTbiC /tmp/tmp.mPdVZbjim4 + return 0 + desc 'create namespace mongod-major-upgrade-sharded-18692' + set +o xtrace ----------------------------------------------------------------------------------- create namespace mongod-major-upgrade-sharded-18692 ----------------------------------------------------------------------------------- + kubectl_bin create namespace mongod-major-upgrade-sharded-18692 ++ mktemp + local LAST_OUT=/tmp/tmp.y0agtxynjS ++ mktemp + local LAST_ERR=/tmp/tmp.1NtgRIbv0w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace mongod-major-upgrade-sharded-18692 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y0agtxynjS namespace/mongod-major-upgrade-sharded-18692 created + cat /tmp/tmp.1NtgRIbv0w + rm /tmp/tmp.y0agtxynjS /tmp/tmp.1NtgRIbv0w + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WKm5jf4aTW +++ mktemp ++ local LAST_ERR=/tmp/tmp.g1vgzc6R3C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WKm5jf4aTW ++ cat /tmp/tmp.g1vgzc6R3C ++ rm /tmp/tmp.WKm5jf4aTW /tmp/tmp.g1vgzc6R3C ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1568-77a673e0-1-cluster3 --namespace=mongod-major-upgrade-sharded-18692 ++ mktemp + local LAST_OUT=/tmp/tmp.7MTcs3aeNu ++ mktemp + local LAST_ERR=/tmp/tmp.S0LueOenau + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1568-77a673e0-1-cluster3 --namespace=mongod-major-upgrade-sharded-18692 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7MTcs3aeNu Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1568-77a673e0-1-cluster3" modified. + cat /tmp/tmp.S0LueOenau + rm /tmp/tmp.7MTcs3aeNu /tmp/tmp.S0LueOenau + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Tm9TvfzSXZ ++ mktemp + local LAST_ERR=/tmp/tmp.hcYrEs4fus + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tm9TvfzSXZ secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.hcYrEs4fus + rm /tmp/tmp.Tm9TvfzSXZ /tmp/tmp.hcYrEs4fus + return 0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.byfFrufpsj ++ mktemp + local LAST_ERR=/tmp/tmp.Cwn8eyVnf5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.byfFrufpsj deployment.apps/psmdb-client created secret/some-users created + cat /tmp/tmp.Cwn8eyVnf5 + rm /tmp/tmp.byfFrufpsj /tmp/tmp.Cwn8eyVnf5 + return 0 + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + cp /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.dep.json /tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.dep.json + generate_vs_json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.json /tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.json + local template_path=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.json + local target_path=/tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.json ++ jq '.versions[0].operator="1.17.0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/operator.main.psmdb-operator.json + local 'version_service_source={ "versions": [ { "operator": "1.17.0", "product": "psmdb-operator", "matrix": { "mongod": {}, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' + for image_mongod in '${IMAGE_MONGOD_CHAIN[@]}' ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod5.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod5.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod5.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod5.0 +++ local 'cli=mongod --version' +++ local pod_name=22683 +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ kubectl_bin -n default run 22683 --image=perconalab/percona-server-mongodb-operator:main-mongod5.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oUR5JczlW1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.we8hL3TGN4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 22683 --image=perconalab/percona-server-mongodb-operator:main-mongod5.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oUR5JczlW1 +++ cat /tmp/tmp.we8hL3TGN4 +++ rm /tmp/tmp.oUR5JczlW1 /tmp/tmp.we8hL3TGN4 +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/22683 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PD4dZYY7cU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JM4BXwCNrv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/22683 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PD4dZYY7cU +++ cat /tmp/tmp.JM4BXwCNrv +++ rm /tmp/tmp.PD4dZYY7cU /tmp/tmp.JM4BXwCNrv +++ return 0 ++++ kubectl_bin -n default exec 22683 -- mongod --version +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pezyQwPlm1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6zLo7sjBHR ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 22683 -- mongod --version ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.pezyQwPlm1 ++++ cat /tmp/tmp.6zLo7sjBHR ++++ rm /tmp/tmp.pezyQwPlm1 /tmp/tmp.6zLo7sjBHR ++++ return 0 +++ local 'output=db version v5.0.26-22 Build Info: { "version": "5.0.26-22", "gitVersion": "565331dec31cc2affadd67913bef70906134aedc", "openSSLVersion": "OpenSSL 1.1.1k FIPS 25 Mar 2021", "modules": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/22683 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yYBI5ca0oe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.T0D7wH0vum +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/22683 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yYBI5ca0oe +++ cat /tmp/tmp.T0D7wH0vum Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.yYBI5ca0oe /tmp/tmp.T0D7wH0vum +++ return 0 +++ echo db version v5.0.26-22 Build Info: '{' '"version":' '"5.0.26-22",' '"gitVersion":' '"565331dec31cc2affadd67913bef70906134aedc",' '"openSSLVersion":' '"OpenSSL' 1.1.1k FIPS 25 Mar '2021",' '"modules":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=5.0.26-22 ++ [[ ! 5.0.26-22 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 5.0.26-22 + current_mongod_version=5.0.26-22 ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.17.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' ++ jq '.versions[0].matrix.mongod += {"5.0.26-22": {"image_path":"perconalab/percona-server-mongodb-operator:main-mongod5.0","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.17.0", "product": "psmdb-operator", "matrix": { "mongod": { "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' + for image_mongod in '${IMAGE_MONGOD_CHAIN[@]}' ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod6.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod6.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod6.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod6.0 +++ local 'cli=mongod --version' +++ local pod_name=28149 +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ kubectl_bin -n default run 28149 --image=perconalab/percona-server-mongodb-operator:main-mongod6.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YSV3LiPjlQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XvEQvW2Uh4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 28149 --image=perconalab/percona-server-mongodb-operator:main-mongod6.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YSV3LiPjlQ +++ cat /tmp/tmp.XvEQvW2Uh4 +++ rm /tmp/tmp.YSV3LiPjlQ /tmp/tmp.XvEQvW2Uh4 +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/28149 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GS6d1vpDef ++++ mktemp +++ local LAST_ERR=/tmp/tmp.etN5TQ0Bzd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/28149 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GS6d1vpDef +++ cat /tmp/tmp.etN5TQ0Bzd +++ rm /tmp/tmp.GS6d1vpDef /tmp/tmp.etN5TQ0Bzd +++ return 0 ++++ kubectl_bin -n default exec 28149 -- mongod --version +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nDUlxEmMLc +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.F4xdqAAiPc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 28149 -- mongod --version ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.nDUlxEmMLc ++++ cat /tmp/tmp.F4xdqAAiPc ++++ rm /tmp/tmp.nDUlxEmMLc /tmp/tmp.F4xdqAAiPc ++++ return 0 +++ local 'output=db version v6.0.15-12 Build Info: { "version": "6.0.15-12", "gitVersion": "2c4ff0c994742506096fae92dc182d61380c2854", "openSSLVersion": "OpenSSL 1.1.1k FIPS 25 Mar 2021", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/28149 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6x7TXyHgWJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uQcOgJbEIr +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/28149 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6x7TXyHgWJ +++ cat /tmp/tmp.uQcOgJbEIr Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.6x7TXyHgWJ /tmp/tmp.uQcOgJbEIr +++ return 0 +++ echo db version v6.0.15-12 Build Info: '{' '"version":' '"6.0.15-12",' '"gitVersion":' '"2c4ff0c994742506096fae92dc182d61380c2854",' '"openSSLVersion":' '"OpenSSL' 1.1.1k FIPS 25 Mar '2021",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=6.0.15-12 ++ [[ ! 6.0.15-12 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 6.0.15-12 + current_mongod_version=6.0.15-12 ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.17.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' ++ jq '.versions[0].matrix.mongod += {"6.0.15-12": {"image_path":"perconalab/percona-server-mongodb-operator:main-mongod6.0","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.17.0", "product": "psmdb-operator", "matrix": { "mongod": { "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" }, "6.0.15-12": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' + for image_mongod in '${IMAGE_MONGOD_CHAIN[@]}' ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local pod_name=24148 +++ kubectl_bin -n default run 24148 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FNfarGJH68 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XYo32BzqWA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 24148 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FNfarGJH68 +++ cat /tmp/tmp.XYo32BzqWA +++ rm /tmp/tmp.FNfarGJH68 /tmp/tmp.XYo32BzqWA +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/24148 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TSf1JskkUv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3whRjrur2A +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/24148 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TSf1JskkUv +++ cat /tmp/tmp.3whRjrur2A +++ rm /tmp/tmp.TSf1JskkUv /tmp/tmp.3whRjrur2A +++ return 0 ++++ kubectl_bin -n default exec 24148 -- mongod --version +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zs78OZeAqE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.PSSZ8CRhD2 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 24148 -- mongod --version ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.zs78OZeAqE ++++ cat /tmp/tmp.PSSZ8CRhD2 ++++ rm /tmp/tmp.zs78OZeAqE /tmp/tmp.PSSZ8CRhD2 ++++ return 0 +++ local 'output=db version v7.0.11-6 Build Info: { "version": "7.0.11-6", "gitVersion": "0818dfdcc4349542e73dd63f56ab35f92498a115", "openSSLVersion": "OpenSSL 1.1.1k FIPS 25 Mar 2021", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/24148 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YrbJMCEu3w ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fEo6czhXjt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/24148 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YrbJMCEu3w +++ cat /tmp/tmp.fEo6czhXjt Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.YrbJMCEu3w /tmp/tmp.fEo6czhXjt +++ return 0 +++ echo db version v7.0.11-6 Build Info: '{' '"version":' '"7.0.11-6",' '"gitVersion":' '"0818dfdcc4349542e73dd63f56ab35f92498a115",' '"openSSLVersion":' '"OpenSSL' 1.1.1k FIPS 25 Mar '2021",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.11-6 ++ [[ ! 7.0.11-6 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.11-6 + current_mongod_version=7.0.11-6 ++ jq '.versions[0].matrix.mongod += {"7.0.11-6": {"image_path":"perconalab/percona-server-mongodb-operator:main-mongod7.0","status":"recommended"}}' ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.17.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '},' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' + version_service_source='{ "versions": [ { "operator": "1.17.0", "product": "psmdb-operator", "matrix": { "mongod": { "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" }, "6.0.15-12": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.11-6": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": {}, "operator": {} } } ] }' ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.17.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '},' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{},' '"operator":' '{}' '}' '}' ']' '}' +++ get_pbm_version perconalab/percona-server-mongodb-operator:main-backup +++ local image=perconalab/percona-server-mongodb-operator:main-backup ++++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-backup 'pbm-agent version' ++++ local image=perconalab/percona-server-mongodb-operator:main-backup ++++ /usr/bin/sed -r 's/^Version:\ (([0-9]+\.){2}[0-9]+)\ .*/\1/g' ++++ local 'cli=pbm-agent version' ++++ local pod_name=13545 ++++ kubectl_bin -n default run 13545 --image=perconalab/percona-server-mongodb-operator:main-backup --restart=Never --command -- sleep infinity +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Qj47A9LMlq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4TYfo0r9vM ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default run 13545 --image=perconalab/percona-server-mongodb-operator:main-backup --restart=Never --command -- sleep infinity ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Qj47A9LMlq ++++ cat /tmp/tmp.4TYfo0r9vM ++++ rm /tmp/tmp.Qj47A9LMlq /tmp/tmp.4TYfo0r9vM ++++ return 0 ++++ kubectl_bin -n default wait --for=condition=Ready pod/13545 +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.C279kLN6ye +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.S7X7PmrkFQ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default wait --for=condition=Ready pod/13545 ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.C279kLN6ye ++++ cat /tmp/tmp.S7X7PmrkFQ ++++ rm /tmp/tmp.C279kLN6ye /tmp/tmp.S7X7PmrkFQ ++++ return 0 +++++ kubectl_bin -n default exec 13545 -- pbm-agent version ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.YHEhdNSwYt ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.mUvsZ4Owst +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl -n default exec 13545 -- pbm-agent version +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 -a -n 1 ']' +++++ break +++++ cat /tmp/tmp.YHEhdNSwYt +++++ cat /tmp/tmp.mUvsZ4Owst +++++ rm /tmp/tmp.YHEhdNSwYt /tmp/tmp.mUvsZ4Owst +++++ return 0 ++++ local 'output=Version: 2.5.0 Platform: linux/amd64 GitCommit: 0d1a4ce8f0471ca3fc6ed13f5a5ed9986990dad1 GitBranch: release-2.5.0 BuildTime: 2024-05-15_10:10_UTC GoVersion: go1.22.3' ++++ kubectl_bin -n default delete pod/13545 --grace-period=0 --force +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.4plDf8FqQM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.O2ms8BNCLD ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default delete pod/13545 --grace-period=0 --force ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.4plDf8FqQM ++++ cat /tmp/tmp.O2ms8BNCLD Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. ++++ rm /tmp/tmp.4plDf8FqQM /tmp/tmp.O2ms8BNCLD ++++ return 0 ++++ echo Version: 2.5.0 Platform: linux/amd64 GitCommit: 0d1a4ce8f0471ca3fc6ed13f5a5ed9986990dad1 GitBranch: release-2.5.0 BuildTime: 2024-05-15_10:10_UTC GoVersion: go1.22.3 +++ local version_info=2.5.0 +++ [[ ! 2.5.0 =~ ^([0-9]+\.){2}[0-9]+$ ]] +++ echo 2.5.0 ++ jq '.versions[0].matrix.backup += {"2.5.0": {"image_path":"perconalab/percona-server-mongodb-operator:main-backup","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.17.0", "product": "psmdb-operator", "matrix": { "mongod": { "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" }, "6.0.15-12": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.11-6": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": { "2.5.0": { "image_path": "perconalab/percona-server-mongodb-operator:main-backup", "status": "recommended" } }, "operator": {} } } ] }' ++ echo '{' '"versions":' '[' '{' '"operator":' '"1.17.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '},' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{' '"2.5.0":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-backup",' '"status":' '"recommended"' '}' '},' '"operator":' '{}' '}' '}' ']' '}' ++ jq '.versions[0].matrix.operator += {"1.17.0": {"image_path":"perconalab/percona-server-mongodb-operator:PR-1568-77a673e0","status":"recommended"}}' + version_service_source='{ "versions": [ { "operator": "1.17.0", "product": "psmdb-operator", "matrix": { "mongod": { "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" }, "6.0.15-12": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.11-6": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } }, "pmm": { "2.27.0": { "image_path": "percona/pmm-client:2.27.0", "image_hash": "4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884", "status": "recommended", "critical": false } }, "backup": { "2.5.0": { "image_path": "perconalab/percona-server-mongodb-operator:main-backup", "status": "recommended" } }, "operator": { "1.17.0": { "image_path": "perconalab/percona-server-mongodb-operator:PR-1568-77a673e0", "status": "recommended" } } } } ] }' + jq . + echo '{' '"versions":' '[' '{' '"operator":' '"1.17.0",' '"product":' '"psmdb-operator",' '"matrix":' '{' '"mongod":' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '},' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '},' '"pmm":' '{' '"2.27.0":' '{' '"image_path":' '"percona/pmm-client:2.27.0",' '"image_hash":' '"4f4d4508afe4ef3b5d72e2ebec6485be84204902b0b2b23c3a9e7c2fe4726884",' '"status":' '"recommended",' '"critical":' false '}' '},' '"backup":' '{' '"2.5.0":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-backup",' '"status":' '"recommended"' '}' '},' '"operator":' '{' '"1.17.0":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:PR-1568-77a673e0",' '"status":' '"recommended"' '}' '}' '}' '}' ']' '}' + kubectl_bin create configmap -n psmdb-operator versions --from-file /tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.dep.json --from-file /tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.rXb3p06TFK ++ mktemp + local LAST_ERR=/tmp/tmp.VLDDBWEAxE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create configmap -n psmdb-operator versions --from-file /tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.dep.json --from-file /tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rXb3p06TFK configmap/versions created + cat /tmp/tmp.VLDDBWEAxE + rm /tmp/tmp.rXb3p06TFK /tmp/tmp.VLDDBWEAxE + return 0 + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/vs.yml + local LAST_OUT=/tmp/tmp.c1nRURqn1Z ++ mktemp + local LAST_ERR=/tmp/tmp.4a9S1ylPoj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + /usr/bin/sed -r s#operator.9.9.9.psmdb-operator#operator.1.17.0.psmdb-operator#g + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c1nRURqn1Z deployment.apps/version-service created service/version-service created + cat /tmp/tmp.4a9S1ylPoj + rm /tmp/tmp.c1nRURqn1Z /tmp/tmp.4a9S1ylPoj + return 0 ++ jq '.[] | .[] |.matrix.mongod' /tmp/tmp.JZLX6ng15v/operator.1.17.0.psmdb-operator.json + version_matrix='{ "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" }, "6.0.15-12": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.11-6": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } }' ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local pod_name=16330 +++ kubectl_bin -n default run 16330 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lhve0IlKgD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nSGv61jFQb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 16330 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lhve0IlKgD +++ cat /tmp/tmp.nSGv61jFQb +++ rm /tmp/tmp.lhve0IlKgD /tmp/tmp.nSGv61jFQb +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/16330 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xcRDzosUWF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.orSFxE9gR2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/16330 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xcRDzosUWF +++ cat /tmp/tmp.orSFxE9gR2 +++ rm /tmp/tmp.xcRDzosUWF /tmp/tmp.orSFxE9gR2 +++ return 0 ++++ kubectl_bin -n default exec 16330 -- mongod --version +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.IOAXwpXfll +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bPhAbjqFVM ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 16330 -- mongod --version ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.IOAXwpXfll ++++ cat /tmp/tmp.bPhAbjqFVM ++++ rm /tmp/tmp.IOAXwpXfll /tmp/tmp.bPhAbjqFVM ++++ return 0 +++ local 'output=db version v7.0.11-6 Build Info: { "version": "7.0.11-6", "gitVersion": "0818dfdcc4349542e73dd63f56ab35f92498a115", "openSSLVersion": "OpenSSL 1.1.1k FIPS 25 Mar 2021", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/16330 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DwMtb5Wtlw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.O6pXSnzjCB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/16330 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DwMtb5Wtlw +++ cat /tmp/tmp.O6pXSnzjCB Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.DwMtb5Wtlw /tmp/tmp.O6pXSnzjCB +++ return 0 +++ echo db version v7.0.11-6 Build Info: '{' '"version":' '"7.0.11-6",' '"gitVersion":' '"0818dfdcc4349542e73dd63f56ab35f92498a115",' '"openSSLVersion":' '"OpenSSL' 1.1.1k FIPS 25 Mar '2021",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.11-6 ++ [[ ! 7.0.11-6 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.11-6 + current_mongod_version=7.0.11-6 ++ echo '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '},' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '}' ++ jq '. += {"7.0.11-6":{"image_path":"perconalab/percona-server-mongodb-operator:main-mongod7.0","status": "recommended"}}' + version_matrix='{ "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" }, "6.0.15-12": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" }, "7.0.11-6": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } }' ++ echo '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '},' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '},' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '}' ++ jq 'to_entries | sort_by( .key | split("[[:punct:]]";"g") | map(tonumber) ) | map({(.key): .value}) ' + version_matrix='[ { "5.0.26-22": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod5.0", "status": "recommended" } }, { "6.0.15-12": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod6.0", "status": "recommended" } }, { "7.0.11-6": { "image_path": "perconalab/percona-server-mongodb-operator:main-mongod7.0", "status": "recommended" } } ]' ++ echo '[' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '}' '},' '{' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '{' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '}' ']' ++ jq -r '.[] | keys | .[] | split(".") | .[:2] | join(".")' ++ uniq ++ tail -n +2 + versions_to_verify='6.0 7.0' + cluster=some-name ++ jq '.[0] | keys | .[0]' ++ echo '[' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '}' '},' '{' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '{' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '}' ']' + desc 'Starting the cluster with IMAGE_MONGOD "5.0.26-22"' + set +o xtrace ----------------------------------------------------------------------------------- Starting the cluster with IMAGE_MONGOD "5.0.26-22" ----------------------------------------------------------------------------------- ++ echo '[' '{' '"5.0.26-22":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod5.0",' '"status":' '"recommended"' '}' '},' '{' '"6.0.15-12":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod6.0",' '"status":' '"recommended"' '}' '},' '{' '"7.0.11-6":' '{' '"image_path":' '"perconalab/percona-server-mongodb-operator:main-mongod7.0",' '"status":' '"recommended"' '}' '}' ']' ++ jq -r 'to_entries | .[0].value | .[].image_path' + export IMAGE_MONGOD=perconalab/percona-server-mongodb-operator:main-mongod5.0 + IMAGE_MONGOD=perconalab/percona-server-mongodb-operator:main-mongod5.0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/some-name.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/some-name.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/conf/some-name.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod5.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1568-77a673e0"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.QvWwrtCX4n ++ mktemp + local LAST_ERR=/tmp/tmp.7mNWwIfEdq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QvWwrtCX4n perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.7mNWwIfEdq + rm /tmp/tmp.QvWwrtCX4n /tmp/tmp.7mNWwIfEdq + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BAjB2u8CiS +++ mktemp ++ local LAST_ERR=/tmp/tmp.K5hI95ZtzH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BAjB2u8CiS ++ cat /tmp/tmp.K5hI95ZtzH ++ rm /tmp/tmp.BAjB2u8CiS /tmp/tmp.K5hI95ZtzH ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready..............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sQVSUHvfqm +++ mktemp ++ local LAST_ERR=/tmp/tmp.hE8H6pNR4s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sQVSUHvfqm ++ cat /tmp/tmp.hE8H6pNR4s ++ rm /tmp/tmp.sQVSUHvfqm /tmp/tmp.hE8H6pNR4s ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................ + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1H4Qs1hAgc +++ mktemp ++ local LAST_ERR=/tmp/tmp.J3eluAinyl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1H4Qs1hAgc ++ cat /tmp/tmp.J3eluAinyl ++ rm /tmp/tmp.1H4Qs1hAgc /tmp/tmp.J3eluAinyl ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nIGZza1LLX +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ip9SWwcSDO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nIGZza1LLX ++ cat /tmp/tmp.Ip9SWwcSDO ++ rm /tmp/tmp.nIGZza1LLX /tmp/tmp.Ip9SWwcSDO ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JRY1R0qXNe +++ mktemp ++ local LAST_ERR=/tmp/tmp.QghJCibRpL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JRY1R0qXNe ++ cat /tmp/tmp.QghJCibRpL ++ rm /tmp/tmp.JRY1R0qXNe /tmp/tmp.QghJCibRpL ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RJzQtJTlwD +++ mktemp ++ local LAST_ERR=/tmp/tmp.3FU72fG9o8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RJzQtJTlwD ++ cat /tmp/tmp.3FU72fG9o8 ++ rm /tmp/tmp.RJzQtJTlwD /tmp/tmp.3FU72fG9o8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0y0XQanGhy +++ mktemp ++ local LAST_ERR=/tmp/tmp.NcAnZTeVzI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0y0XQanGhy ++ cat /tmp/tmp.NcAnZTeVzI ++ rm /tmp/tmp.0y0XQanGhy /tmp/tmp.NcAnZTeVzI ++ return 0 + [[ ready == \r\e\a\d\y ]] + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-18692 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-18692 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hyuZ3HuZVw +++ mktemp ++ local LAST_ERR=/tmp/tmp.iSbyMdS3oT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hyuZ3HuZVw ++ cat /tmp/tmp.iSbyMdS3oT ++ rm /tmp/tmp.hyuZ3HuZVw /tmp/tmp.iSbyMdS3oT ++ return 0 + local client_container=psmdb-client-7469665986-9222t + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.0lOASYD9uU ++ mktemp + local LAST_ERR=/tmp/tmp.SvHTw9zna6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0lOASYD9uU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("0007f254-6996-491f-99cf-ddf1c777aa69") } Percona Server for MongoDB server version: v5.0.26-22 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.SvHTw9zna6 + rm /tmp/tmp.0lOASYD9uU /tmp/tmp.SvHTw9zna6 + return 0 + sleep 2 + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jAp1kl0u9w +++ mktemp ++ local LAST_ERR=/tmp/tmp.79oyHYfQdb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jAp1kl0u9w ++ cat /tmp/tmp.79oyHYfQdb ++ rm /tmp/tmp.jAp1kl0u9w /tmp/tmp.79oyHYfQdb ++ return 0 + local client_container=psmdb-client-7469665986-9222t + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.tQv7gAN7T0 ++ mktemp + local LAST_ERR=/tmp/tmp.RAfv9uItUD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tQv7gAN7T0 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("aceca1f5-e120-4465-84f5-1fc488f58d5d") } Percona Server for MongoDB server version: v5.0.26-22 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RAfv9uItUD + rm /tmp/tmp.tQv7gAN7T0 /tmp/tmp.RAfv9uItUD + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local command=find + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local postfix= + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sGXg364CUz +++ mktemp ++ local LAST_ERR=/tmp/tmp.AskdZ3efOA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sGXg364CUz ++ cat /tmp/tmp.AskdZ3efOA ++ rm /tmp/tmp.sGXg364CUz /tmp/tmp.AskdZ3efOA ++ return 0 + local client_container=psmdb-client-7469665986-9222t + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.o7MGhxPn0z ++ mktemp + local LAST_ERR=/tmp/tmp.y42K8Ee6Xb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o7MGhxPn0z + cat /tmp/tmp.y42K8Ee6Xb + rm /tmp/tmp.o7MGhxPn0z /tmp/tmp.y42K8Ee6Xb + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/compare/find.json /tmp/tmp.JZLX6ng15v/find + desc 'Starting to follow mongod upgrade images chain' + set +o xtrace ----------------------------------------------------------------------------------- Starting to follow mongod upgrade images chain ----------------------------------------------------------------------------------- + target_generation=2 + for version in '${versions_to_verify[@]}' + desc 'Testing upgrade to version: 6.0' + set +o xtrace ----------------------------------------------------------------------------------- Testing upgrade to version: 6.0 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "6.0-recommended"}, ]' ++ mktemp + local LAST_OUT=/tmp/tmp.I89qjfzups ++ mktemp + local LAST_ERR=/tmp/tmp.PccdwGK7iU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "6.0-recommended"}, ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I89qjfzups perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.PccdwGK7iU + rm /tmp/tmp.I89qjfzups /tmp/tmp.PccdwGK7iU + return 0 + sleep 70 + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U1PBQ6Mwg6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rEZTbFKSFh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U1PBQ6Mwg6 ++ cat /tmp/tmp.rEZTbFKSFh ++ rm /tmp/tmp.U1PBQ6Mwg6 /tmp/tmp.rEZTbFKSFh ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uif5wwCN0D +++ mktemp ++ local LAST_ERR=/tmp/tmp.sIQVfE7Xuc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uif5wwCN0D ++ cat /tmp/tmp.sIQVfE7Xuc ++ rm /tmp/tmp.uif5wwCN0D /tmp/tmp.sIQVfE7Xuc ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................................ + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XUiXjdlp4U +++ mktemp ++ local LAST_ERR=/tmp/tmp.QOqnLB6ZxK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XUiXjdlp4U ++ cat /tmp/tmp.QOqnLB6ZxK ++ rm /tmp/tmp.XUiXjdlp4U /tmp/tmp.QOqnLB6ZxK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8uUOD2GwAy +++ mktemp ++ local LAST_ERR=/tmp/tmp.j7qbRToSoi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8uUOD2GwAy ++ cat /tmp/tmp.j7qbRToSoi ++ rm /tmp/tmp.8uUOD2GwAy /tmp/tmp.j7qbRToSoi ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ccn5laLZk5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7vsj9WcYdT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ccn5laLZk5 ++ cat /tmp/tmp.7vsj9WcYdT ++ rm /tmp/tmp.Ccn5laLZk5 /tmp/tmp.7vsj9WcYdT ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.odOoVHYe03 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2gCZEKUw70 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.odOoVHYe03 ++ cat /tmp/tmp.2gCZEKUw70 ++ rm /tmp/tmp.odOoVHYe03 /tmp/tmp.2gCZEKUw70 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sj393QRxrL +++ mktemp ++ local LAST_ERR=/tmp/tmp.xHrDxZAkcE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Sj393QRxrL ++ cat /tmp/tmp.xHrDxZAkcE ++ rm /tmp/tmp.Sj393QRxrL /tmp/tmp.xHrDxZAkcE ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_generation 2 statefulset some-name-cfg + local generation=2 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f3Zok4doxH +++ mktemp ++ local LAST_ERR=/tmp/tmp.E3CGb5KZI2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f3Zok4doxH ++ cat /tmp/tmp.E3CGb5KZI2 ++ rm /tmp/tmp.f3Zok4doxH /tmp/tmp.E3CGb5KZI2 ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset some-name-rs0 + local generation=2 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PxCnbA5bxD +++ mktemp ++ local LAST_ERR=/tmp/tmp.F3MZkxTcZn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PxCnbA5bxD ++ cat /tmp/tmp.F3MZkxTcZn ++ rm /tmp/tmp.PxCnbA5bxD /tmp/tmp.F3MZkxTcZn ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + compare_generation 2 statefulset some-name-mongos + local generation=2 + local resource_type=statefulset + local resource_name=some-name-mongos + local current_generation ++ kubectl_bin get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8xPmodGgN6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2AkDNHxg6m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8xPmodGgN6 ++ cat /tmp/tmp.2AkDNHxg6m ++ rm /tmp/tmp.8xPmodGgN6 /tmp/tmp.2AkDNHxg6m ++ return 0 + current_generation=2 + [[ 2 != \2 ]] + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.IvCe8VVSam ++ mktemp + local LAST_ERR=/tmp/tmp.nkXENhQDIJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IvCe8VVSam perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.nkXENhQDIJ + rm /tmp/tmp.IvCe8VVSam /tmp/tmp.nkXENhQDIJ + return 0 + sleep 10 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Je72N9sova +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q2K7FSR9vT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Je72N9sova ++ cat /tmp/tmp.Q2K7FSR9vT ++ rm /tmp/tmp.Je72N9sova /tmp/tmp.Q2K7FSR9vT ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ run_mongos 'JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692 ++ local 'command=JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692 ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ grep -E '^\{.*\}$' ++ jq -r .featureCompatibilityVersion.version +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pA635bgmnn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.g7KsDj74ln +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pA635bgmnn +++ cat /tmp/tmp.g7KsDj74ln +++ rm /tmp/tmp.pA635bgmnn /tmp/tmp.g7KsDj74ln +++ return 0 ++ local client_container=psmdb-client-7469665986-9222t ++ local mongo_flag= ++ kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7RuSwYwxr4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.q4FSR9sg3w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7RuSwYwxr4 ++ cat /tmp/tmp.q4FSR9sg3w ++ rm /tmp/tmp.7RuSwYwxr4 /tmp/tmp.q4FSR9sg3w ++ return 0 + currentFCV=6.0 + [[ 6.0 != 6.0 ]] + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RcUrZIZUDI +++ mktemp ++ local LAST_ERR=/tmp/tmp.kzFd2aztG1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RcUrZIZUDI ++ cat /tmp/tmp.kzFd2aztG1 ++ rm /tmp/tmp.RcUrZIZUDI /tmp/tmp.kzFd2aztG1 ++ return 0 + local client_container=psmdb-client-7469665986-9222t + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.IQdNPdexZD ++ mktemp + local LAST_ERR=/tmp/tmp.uMZReYN15X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IQdNPdexZD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("13ed6de5-116c-4330-8702-5267d3902a0e") } Percona Server for MongoDB server version: v6.0.15-12 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.uMZReYN15X + rm /tmp/tmp.IQdNPdexZD /tmp/tmp.uMZReYN15X + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 -2 + local command=find + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local postfix=-2 + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 mongodb '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local driver=mongodb + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xZYz5UMSKA +++ mktemp ++ local LAST_ERR=/tmp/tmp.JhE7NneWlW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xZYz5UMSKA ++ cat /tmp/tmp.JhE7NneWlW ++ rm /tmp/tmp.xZYz5UMSKA /tmp/tmp.JhE7NneWlW ++ return 0 + local client_container=psmdb-client-7469665986-9222t + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.QgbhFdY1Zc ++ mktemp + local LAST_ERR=/tmp/tmp.PueUKgoXWw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QgbhFdY1Zc + cat /tmp/tmp.PueUKgoXWw + rm /tmp/tmp.QgbhFdY1Zc /tmp/tmp.PueUKgoXWw + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/compare/find-2.json /tmp/tmp.JZLX6ng15v/find-2 + target_generation=3 + for version in '${versions_to_verify[@]}' + desc 'Testing upgrade to version: 7.0' + set +o xtrace ----------------------------------------------------------------------------------- Testing upgrade to version: 7.0 ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "7.0-recommended"}, ]' ++ mktemp + local LAST_OUT=/tmp/tmp.vMeIQrj0yD ++ mktemp + local LAST_ERR=/tmp/tmp.wJAI9Ejj3J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/apply","value": "7.0-recommended"}, ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vMeIQrj0yD perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.wJAI9Ejj3J + rm /tmp/tmp.vMeIQrj0yD /tmp/tmp.wJAI9Ejj3J + return 0 + sleep 70 + wait_for_running some-name-cfg 3 + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KRDpX1y7eR +++ mktemp ++ local LAST_ERR=/tmp/tmp.WIMDelZyt1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KRDpX1y7eR ++ cat /tmp/tmp.WIMDelZyt1 ++ rm /tmp/tmp.KRDpX1y7eR /tmp/tmp.WIMDelZyt1 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BDfljlyz6B +++ mktemp ++ local LAST_ERR=/tmp/tmp.qmNZI3Z8ng ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BDfljlyz6B ++ cat /tmp/tmp.qmNZI3Z8ng ++ rm /tmp/tmp.BDfljlyz6B /tmp/tmp.qmNZI3Z8ng ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............................................................................. + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dow34N7eSF +++ mktemp ++ local LAST_ERR=/tmp/tmp.NgMNDsCGeW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Dow34N7eSF ++ cat /tmp/tmp.NgMNDsCGeW ++ rm /tmp/tmp.Dow34N7eSF /tmp/tmp.NgMNDsCGeW ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X6efw20RPD +++ mktemp ++ local LAST_ERR=/tmp/tmp.8ad5r7vy27 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X6efw20RPD ++ cat /tmp/tmp.8ad5r7vy27 ++ rm /tmp/tmp.X6efw20RPD /tmp/tmp.8ad5r7vy27 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XNlCLJY5mW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Lvt3xbQzbp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XNlCLJY5mW ++ cat /tmp/tmp.Lvt3xbQzbp ++ rm /tmp/tmp.XNlCLJY5mW /tmp/tmp.Lvt3xbQzbp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6cmows9cOY +++ mktemp ++ local LAST_ERR=/tmp/tmp.GnxRp9VnRX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6cmows9cOY ++ cat /tmp/tmp.GnxRp9VnRX ++ rm /tmp/tmp.6cmows9cOY /tmp/tmp.GnxRp9VnRX ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VLNip0llrX +++ mktemp ++ local LAST_ERR=/tmp/tmp.BDXjRS0UiW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VLNip0llrX ++ cat /tmp/tmp.BDXjRS0UiW ++ rm /tmp/tmp.VLNip0llrX /tmp/tmp.BDXjRS0UiW ++ return 0 + [[ ready == \r\e\a\d\y ]] + compare_generation 3 statefulset some-name-cfg + local generation=3 + local resource_type=statefulset + local resource_name=some-name-cfg + local current_generation ++ kubectl_bin get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MXLSkZj7Ar +++ mktemp ++ local LAST_ERR=/tmp/tmp.40s0szgv9X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset some-name-cfg -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MXLSkZj7Ar ++ cat /tmp/tmp.40s0szgv9X ++ rm /tmp/tmp.MXLSkZj7Ar /tmp/tmp.40s0szgv9X ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + compare_generation 3 statefulset some-name-rs0 + local generation=3 + local resource_type=statefulset + local resource_name=some-name-rs0 + local current_generation ++ kubectl_bin get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.308vOfMhYE +++ mktemp ++ local LAST_ERR=/tmp/tmp.VR44JdnCgi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset some-name-rs0 -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.308vOfMhYE ++ cat /tmp/tmp.VR44JdnCgi ++ rm /tmp/tmp.308vOfMhYE /tmp/tmp.VR44JdnCgi ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + compare_generation 3 statefulset some-name-mongos + local generation=3 + local resource_type=statefulset + local resource_name=some-name-mongos + local current_generation ++ kubectl_bin get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.quf8GFjSOw +++ mktemp ++ local LAST_ERR=/tmp/tmp.JhX14OxJt3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get statefulset some-name-mongos -o 'jsonpath={.metadata.generation}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.quf8GFjSOw ++ cat /tmp/tmp.JhX14OxJt3 ++ rm /tmp/tmp.quf8GFjSOw /tmp/tmp.JhX14OxJt3 ++ return 0 + current_generation=3 + [[ 3 != \3 ]] + kubectl_bin patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.9wyDkH3Egn ++ mktemp + local LAST_ERR=/tmp/tmp.fCD6MMPBD6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb/some-name --type=json '-p=[ {"op":"replace","path":"/spec/upgradeOptions/setFCV","value": true} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9wyDkH3Egn perconaservermongodb.psmdb.percona.com/some-name patched (no change) + cat /tmp/tmp.fCD6MMPBD6 + rm /tmp/tmp.9wyDkH3Egn /tmp/tmp.fCD6MMPBD6 + return 0 + sleep 10 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H0ytWawdpx +++ mktemp ++ local LAST_ERR=/tmp/tmp.SRBze6u6zV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H0ytWawdpx ++ cat /tmp/tmp.SRBze6u6zV ++ rm /tmp/tmp.H0ytWawdpx /tmp/tmp.SRBze6u6zV ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ run_mongos 'JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692 ++ local 'command=JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692 ++ grep -E '^\{.*\}$' ++ local driver=mongodb ++ local suffix=.svc.cluster.local ++ jq -r .featureCompatibilityVersion.version +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9bFSLttVqI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CsgRKCxYYw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9bFSLttVqI +++ cat /tmp/tmp.CsgRKCxYYw +++ rm /tmp/tmp.9bFSLttVqI /tmp/tmp.CsgRKCxYYw +++ return 0 ++ local client_container=psmdb-client-7469665986-9222t ++ local mongo_flag= ++ kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nZYt0ZhA5d +++ mktemp ++ local LAST_ERR=/tmp/tmp.d9cjw27TIt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''JSON.stringify(db.adminCommand({getParameter:1,featureCompatibilityVersion:1}))\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-cfg.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nZYt0ZhA5d ++ cat /tmp/tmp.d9cjw27TIt ++ rm /tmp/tmp.nZYt0ZhA5d /tmp/tmp.d9cjw27TIt ++ return 0 + currentFCV=7.0 + [[ 7.0 != 7.0 ]] + run_mongos 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.au1jGGotNK +++ mktemp ++ local LAST_ERR=/tmp/tmp.uW8HvR9SvK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.au1jGGotNK ++ cat /tmp/tmp.uW8HvR9SvK ++ rm /tmp/tmp.au1jGGotNK /tmp/tmp.uW8HvR9SvK ++ return 0 + local client_container=psmdb-client-7469665986-9222t + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.G0CYIQfROk ++ mktemp + local LAST_ERR=/tmp/tmp.7vGB78KLI4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G0CYIQfROk Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("ccfb1ab8-6b54-4fc2-83d7-b70605a66005") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.7vGB78KLI4 + rm /tmp/tmp.G0CYIQfROk /tmp/tmp.7vGB78KLI4 + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 -3 + local command=find + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local postfix=-3 + local suffix= + local database=myApp + local collection=test + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 mongodb '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' + local uri=myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692 + local driver=mongodb + local suffix=.svc.cluster.local + /usr/bin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3dvO6UiMuq +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZJzm1RM9O2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3dvO6UiMuq ++ cat /tmp/tmp.ZJzm1RM9O2 ++ rm /tmp/tmp.3dvO6UiMuq /tmp/tmp.ZJzm1RM9O2 ++ return 0 + local client_container=psmdb-client-7469665986-9222t + local mongo_flag= + kubectl_bin exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.4wuDqlqqRe ++ mktemp + local LAST_ERR=/tmp/tmp.MfjDywJQ3I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-7469665986-9222t -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.mongod-major-upgrade-sharded-18692.svc.cluster.local/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4wuDqlqqRe + cat /tmp/tmp.MfjDywJQ3I + rm /tmp/tmp.4wuDqlqqRe /tmp/tmp.MfjDywJQ3I + return 0 + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/e2e-tests/mongod-major-upgrade-sharded/compare/find-3.json /tmp/tmp.JZLX6ng15v/find-3 + target_generation=4 + destroy mongod-major-upgrade-sharded-18692 + local namespace=mongod-major-upgrade-sharded-18692 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.ogxw0c6fE9 ++ mktemp + local LAST_ERR=/tmp/tmp.t6zrCiDt5d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ogxw0c6fE9 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.t6zrCiDt5d + rm /tmp/tmp.ogxw0c6fE9 /tmp/tmp.t6zrCiDt5d + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Xi5gjrik68 ++ mktemp + local LAST_ERR=/tmp/tmp.aGqUrQjTvr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Xi5gjrik68 + cat /tmp/tmp.aGqUrQjTvr + rm /tmp/tmp.Xi5gjrik68 /tmp/tmp.aGqUrQjTvr + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.C8ZlwZRRMY ++ mktemp + local LAST_ERR=/tmp/tmp.XsRfJ3UjT0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C8ZlwZRRMY + cat /tmp/tmp.XsRfJ3UjT0 + rm /tmp/tmp.C8ZlwZRRMY /tmp/tmp.XsRfJ3UjT0 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.MXSe7bbOg8 ++ mktemp + local LAST_ERR=/tmp/tmp.CJC9TgUTqH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MXSe7bbOg8 + cat /tmp/tmp.CJC9TgUTqH + rm /tmp/tmp.MXSe7bbOg8 /tmp/tmp.CJC9TgUTqH + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.neGlneqI5a ++ mktemp + local LAST_ERR=/tmp/tmp.tPAtoX9G9y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1568/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.neGlneqI5a clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.tPAtoX9G9y + rm /tmp/tmp.neGlneqI5a /tmp/tmp.tPAtoX9G9y + return 0 + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + : + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace mongod-major-upgrade-sharded-18692 + rm -rf /tmp/tmp.JZLX6ng15v + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.CTPIZ2iS2P + local LAST_OUT=/tmp/tmp.labfuWQu9s ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.NxVnvOGn9A + local exit_status=0 + local LAST_ERR=/tmp/tmp.huJFF8XYbc + local timeout=4 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace mongod-major-upgrade-sharded-18692 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator