Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/logs/version-service.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra version-service-24327 + local ns=version-service-24327 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.3tGy6VXXJZ ++ mktemp + local LAST_ERR=/tmp/tmp.Rrcf9GaPno + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3tGy6VXXJZ customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Rrcf9GaPno + rm /tmp/tmp.3tGy6VXXJZ /tmp/tmp.Rrcf9GaPno + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.1ogZWbo89r ++ mktemp + local LAST_ERR=/tmp/tmp.xYDBXwMw8Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1ogZWbo89r + cat /tmp/tmp.xYDBXwMw8Z + rm /tmp/tmp.1ogZWbo89r /tmp/tmp.xYDBXwMw8Z + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.lixednFl98 ++ mktemp + local LAST_ERR=/tmp/tmp.nOK3E560dJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lixednFl98 + cat /tmp/tmp.nOK3E560dJ + rm /tmp/tmp.lixednFl98 /tmp/tmp.nOK3E560dJ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ZzoAzTwLSU ++ mktemp + local LAST_ERR=/tmp/tmp.W4y2t0PgXU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZzoAzTwLSU + cat /tmp/tmp.W4y2t0PgXU + rm /tmp/tmp.ZzoAzTwLSU /tmp/tmp.W4y2t0PgXU + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.UN9QHgCXfU ++ mktemp + local LAST_ERR=/tmp/tmp.OhShtTaRbn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UN9QHgCXfU clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.OhShtTaRbn + rm /tmp/tmp.UN9QHgCXfU /tmp/tmp.OhShtTaRbn + return 0 + check_crd_for_deletion PR-2045-4cbffc79 + local git_tag=PR-2045-4cbffc79 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2045-4cbffc79/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sh70cFNRa0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pIahwxhFa2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.sh70cFNRa0 ++ cat /tmp/tmp.pIahwxhFa2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.sh70cFNRa0 ++ cat /tmp/tmp.pIahwxhFa2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.sh70cFNRa0 ++ cat /tmp/tmp.pIahwxhFa2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.sh70cFNRa0 ++ cat /tmp/tmp.pIahwxhFa2 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.sh70cFNRa0 /tmp/tmp.pIahwxhFa2 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + awk '{print$1}' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + xargs kubectl delete ns ++ mktemp + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.FwfWoSQhtR egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.azkbQhtkD3 ++ mktemp + local LAST_ERR=/tmp/tmp.VvghHN2ey3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.kosATl3ptn + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FwfWoSQhtR + cat /tmp/tmp.VvghHN2ey3 + rm /tmp/tmp.FwfWoSQhtR /tmp/tmp.VvghHN2ey3 + return 0 namespace "version-service-801" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.azkbQhtkD3 namespace "psmdb-operator" deleted + cat /tmp/tmp.kosATl3ptn + rm /tmp/tmp.azkbQhtkD3 /tmp/tmp.kosATl3ptn + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.68Lfh6gSR1 ++ mktemp + local LAST_ERR=/tmp/tmp.x336158Sru + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.68Lfh6gSR1 + cat /tmp/tmp.x336158Sru + rm /tmp/tmp.68Lfh6gSR1 /tmp/tmp.x336158Sru + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.YnFTdayd8z ++ mktemp + local LAST_ERR=/tmp/tmp.BpUl3Ahx5A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YnFTdayd8z namespace/psmdb-operator created + cat /tmp/tmp.BpUl3Ahx5A + rm /tmp/tmp.YnFTdayd8z /tmp/tmp.BpUl3Ahx5A + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.kIjRRsxwsG +++ mktemp ++ local LAST_ERR=/tmp/tmp.dXRMw15U30 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kIjRRsxwsG ++ cat /tmp/tmp.dXRMw15U30 ++ rm /tmp/tmp.kIjRRsxwsG /tmp/tmp.dXRMw15U30 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2045-4cbffc79-4-cluster4 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EVMGyKUttm ++ mktemp + local LAST_ERR=/tmp/tmp.YuaQvlLXzM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2045-4cbffc79-4-cluster4 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EVMGyKUttm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2045-4cbffc79-4-cluster4" modified. + cat /tmp/tmp.YuaQvlLXzM + rm /tmp/tmp.EVMGyKUttm /tmp/tmp.YuaQvlLXzM + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.61qZfBsliB ++ mktemp + local LAST_ERR=/tmp/tmp.uI15wP7Ab2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.61qZfBsliB customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.uI15wP7Ab2 + rm /tmp/tmp.61qZfBsliB /tmp/tmp.uI15wP7Ab2 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.0hpmuQL5XA ++ mktemp + local LAST_ERR=/tmp/tmp.E1xSHz01su + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0hpmuQL5XA clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.E1xSHz01su + rm /tmp/tmp.0hpmuQL5XA /tmp/tmp.E1xSHz01su + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.HXuyiRewlq ++ mktemp + local LAST_ERR=/tmp/tmp.O1LJII3PyP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HXuyiRewlq deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.O1LJII3PyP + rm /tmp/tmp.HXuyiRewlq /tmp/tmp.O1LJII3PyP + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.9NanMIs0c2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UdGK7QeL8J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9NanMIs0c2 ++ cat /tmp/tmp.UdGK7QeL8J ++ rm /tmp/tmp.9NanMIs0c2 /tmp/tmp.UdGK7QeL8J ++ return 0 + wait_pod percona-server-mongodb-operator-787f9b79bc-2wt86 + local pod=percona-server-mongodb-operator-787f9b79bc-2wt86 + set +o xtrace waiting for pod/percona-server-mongodb-operator-787f9b79bc-2wt86 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.NLyyreALO8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fdi1mCbEnK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NLyyreALO8 ++ cat /tmp/tmp.Fdi1mCbEnK ++ rm /tmp/tmp.NLyyreALO8 /tmp/tmp.Fdi1mCbEnK ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-787f9b79bc-2wt86 ++ mktemp + local LAST_OUT=/tmp/tmp.Zew1zqkiR3 ++ mktemp + local LAST_ERR=/tmp/tmp.u5adAJiZEv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs percona-server-mongodb-operator-787f9b79bc-2wt86 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Zew1zqkiR3 + cat /tmp/tmp.u5adAJiZEv + rm /tmp/tmp.Zew1zqkiR3 /tmp/tmp.u5adAJiZEv + return 0 2025-09-16T15:38:15.302Z INFO setup Manager starting up {"gitCommit": "4cbffc79dc7467dd090ba165bdb1d88c4552480a", "gitBranch": "PR-2045-4cbffc79", "buildTime": "", "goVersion": "go1.25.1", "os": "linux", "arch": "amd64"} + create_namespace version-service-24327 + local namespace=version-service-24327 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces version-service-24327' + set +o xtrace ++ mktemp + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces version-service-24327 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace version-service-24327 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.YKa4Je6YHd egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.PHkQrHcNfW ++ mktemp + local LAST_ERR=/tmp/tmp.aaeMW4UFcH + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.9YwCZ1BS9f + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace version-service-24327 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YKa4Je6YHd + cat /tmp/tmp.aaeMW4UFcH + rm /tmp/tmp.YKa4Je6YHd /tmp/tmp.aaeMW4UFcH + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PHkQrHcNfW + cat /tmp/tmp.9YwCZ1BS9f + rm /tmp/tmp.PHkQrHcNfW /tmp/tmp.9YwCZ1BS9f + return 0 + kubectl_bin wait --for=delete namespace version-service-24327 ++ mktemp + local LAST_OUT=/tmp/tmp.r6dklZUqYj ++ mktemp + local LAST_ERR=/tmp/tmp.WS2RHxZqUJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace version-service-24327 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r6dklZUqYj + cat /tmp/tmp.WS2RHxZqUJ + rm /tmp/tmp.r6dklZUqYj /tmp/tmp.WS2RHxZqUJ + return 0 + desc 'create namespace version-service-24327' + set +o xtrace ----------------------------------------------------------------------------------- create namespace version-service-24327 ----------------------------------------------------------------------------------- + kubectl_bin create namespace version-service-24327 ++ mktemp + local LAST_OUT=/tmp/tmp.9977xNL9Ir ++ mktemp + local LAST_ERR=/tmp/tmp.9XJ0HJ96tM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace version-service-24327 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9977xNL9Ir namespace/version-service-24327 created + cat /tmp/tmp.9XJ0HJ96tM + rm /tmp/tmp.9977xNL9Ir /tmp/tmp.9XJ0HJ96tM + return 0 + set_kube_ctx version-service-24327 + local namespace=version-service-24327 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.RPh13sqHiu +++ mktemp ++ local LAST_ERR=/tmp/tmp.1n0j5IUBpo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RPh13sqHiu ++ cat /tmp/tmp.1n0j5IUBpo ++ rm /tmp/tmp.RPh13sqHiu /tmp/tmp.1n0j5IUBpo ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2045-4cbffc79-4-cluster4 --namespace=version-service-24327 ++ mktemp + local LAST_OUT=/tmp/tmp.smw7wZHrcl ++ mktemp + local LAST_ERR=/tmp/tmp.SyYk5g2k4a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2045-4cbffc79-4-cluster4 --namespace=version-service-24327 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.smw7wZHrcl Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2045-4cbffc79-4-cluster4" modified. + cat /tmp/tmp.SyYk5g2k4a + rm /tmp/tmp.smw7wZHrcl /tmp/tmp.SyYk5g2k4a + return 0 + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.ryjEE2vd9B ++ mktemp + local LAST_ERR=/tmp/tmp.WGSh3mIbmt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ryjEE2vd9B configmap/versions created + cat /tmp/tmp.WGSh3mIbmt + rm /tmp/tmp.ryjEE2vd9B /tmp/tmp.WGSh3mIbmt + return 0 + kubectl_bin apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Lb0hGR8QkQ ++ mktemp + local LAST_ERR=/tmp/tmp.v2N6xdlsQu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lb0hGR8QkQ deployment.apps/version-service created service/version-service created + cat /tmp/tmp.v2N6xdlsQu + rm /tmp/tmp.Lb0hGR8QkQ /tmp/tmp.v2N6xdlsQu + return 0 + sleep 10 + yq eval '(.. | select(tag == "!!str")) |= sub("version-service$", "version-service-cr")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/vs.yml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.9OphmzXoof ++ mktemp + local LAST_ERR=/tmp/tmp.tRCbJg2xKR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9OphmzXoof deployment.apps/version-service-cr created service/version-service-cr created + cat /tmp/tmp.tRCbJg2xKR + rm /tmp/tmp.9OphmzXoof /tmp/tmp.tRCbJg2xKR + return 0 + kubectl_bin -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 ++ mktemp + local LAST_OUT=/tmp/tmp.BkE3IWOqXk ++ mktemp + local LAST_ERR=/tmp/tmp.TvtNYVRKT0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BkE3IWOqXk deployment.apps/percona-server-mongodb-operator env updated + cat /tmp/tmp.TvtNYVRKT0 + rm /tmp/tmp.BkE3IWOqXk /tmp/tmp.TvtNYVRKT0 + return 0 + sleep 30 + desc 'enable telemetry on operator level' + set +o xtrace ----------------------------------------------------------------------------------- enable telemetry on operator level ----------------------------------------------------------------------------------- + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "false"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.e9pmqIgYT1 ++ mktemp + local LAST_OUT=/tmp/tmp.wy4mY1gL3D ++ mktemp + local LAST_ERR=/tmp/tmp.ngSkHY4DdG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + local LAST_ERR=/tmp/tmp.ZLKhFV0IKi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e9pmqIgYT1 + cat /tmp/tmp.ngSkHY4DdG + rm /tmp/tmp.e9pmqIgYT1 /tmp/tmp.ngSkHY4DdG + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wy4mY1gL3D deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.ZLKhFV0IKi + rm /tmp/tmp.wy4mY1gL3D /tmp/tmp.ZLKhFV0IKi + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.OoXINW4EAt ++ mktemp + local LAST_ERR=/tmp/tmp.4rURHJdsQu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OoXINW4EAt + cat /tmp/tmp.4rURHJdsQu + rm /tmp/tmp.OoXINW4EAt /tmp/tmp.4rURHJdsQu + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.JMmUQwg0j1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bo8sX16Y4N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JMmUQwg0j1 ++ cat /tmp/tmp.bo8sX16Y4N ++ rm /tmp/tmp.JMmUQwg0j1 /tmp/tmp.bo8sX16Y4N ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.gGLNuUoRIc +++ mktemp ++ local LAST_ERR=/tmp/tmp.rOi7hhldeC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gGLNuUoRIc ++ cat /tmp/tmp.rOi7hhldeC ++ rm /tmp/tmp.gGLNuUoRIc /tmp/tmp.rOi7hhldeC ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 disabled enabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=enabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.KAWdnaQKkV ++ mktemp + local LAST_ERR=/tmp/tmp.eJdtCZQFCH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KAWdnaQKkV deployment.apps/psmdb-client created + cat /tmp/tmp.eJdtCZQFCH + rm /tmp/tmp.KAWdnaQKkV /tmp/tmp.eJdtCZQFCH + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Zm3yjmOZwP ++ mktemp + local LAST_ERR=/tmp/tmp.n5QbWadd6f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Zm3yjmOZwP secret/minimal-cluster created + cat /tmp/tmp.n5QbWadd6f + rm /tmp/tmp.Zm3yjmOZwP /tmp/tmp.n5QbWadd6f + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.iSnGE2OsDK ++ mktemp + local LAST_ERR=/tmp/tmp.sood3mVMos + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iSnGE2OsDK perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.sood3mVMos + rm /tmp/tmp.iSnGE2OsDK /tmp/tmp.sood3mVMos + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rpj85MVgGH +++ mktemp ++ local LAST_ERR=/tmp/tmp.UbaVVjKr9c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rpj85MVgGH ++ cat /tmp/tmp.UbaVVjKr9c ++ rm /tmp/tmp.Rpj85MVgGH /tmp/tmp.UbaVVjKr9c ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready..................OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ONkX0w1Coa +++ mktemp ++ local LAST_ERR=/tmp/tmp.PUpk7CTjvs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ONkX0w1Coa ++ cat /tmp/tmp.PUpk7CTjvs ++ rm /tmp/tmp.ONkX0w1Coa /tmp/tmp.PUpk7CTjvs ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.guw2krkCE2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KCbASXRFTU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.guw2krkCE2 ++ cat /tmp/tmp.KCbASXRFTU ++ rm /tmp/tmp.guw2krkCE2 /tmp/tmp.KCbASXRFTU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............... + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.51GpDxitnO +++ mktemp ++ local LAST_ERR=/tmp/tmp.ydu3QL2m6b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.51GpDxitnO ++ cat /tmp/tmp.ydu3QL2m6b ++ rm /tmp/tmp.51GpDxitnO /tmp/tmp.ydu3QL2m6b ++ return 0 + local client_container=psmdb-client-66f577db5f-z24q6 + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-z24q6 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yARWw7CVIO ++ mktemp + local LAST_ERR=/tmp/tmp.t2RnwzgYg8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-z24q6 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yARWw7CVIO Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ed21edb0-f213-4cf4-b283-3de0939dbb14") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.t2RnwzgYg8 + rm /tmp/tmp.yARWw7CVIO /tmp/tmp.t2RnwzgYg8 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JudUexprbv +++ mktemp ++ local LAST_ERR=/tmp/tmp.N2XEa3ZgQa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JudUexprbv ++ cat /tmp/tmp.N2XEa3ZgQa ++ rm /tmp/tmp.JudUexprbv /tmp/tmp.N2XEa3ZgQa ++ return 0 + local client_container=psmdb-client-66f577db5f-z24q6 + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-z24q6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NcN1oQQuog ++ mktemp + local LAST_ERR=/tmp/tmp.zxBoMbkQy9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-z24q6 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NcN1oQQuog Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("15731b08-8b62-45bf-b4af-8d130d04ecdb") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zxBoMbkQy9 + rm /tmp/tmp.NcN1oQQuog /tmp/tmp.zxBoMbkQy9 + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-567f5fd64b-v9sq4 -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.6luNdLRLKc ++ mktemp + local LAST_ERR=/tmp/tmp.S22bTAShWd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-567f5fd64b-v9sq4 -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6luNdLRLKc + cat /tmp/tmp.S22bTAShWd + rm /tmp/tmp.6luNdLRLKc /tmp/tmp.S22bTAShWd + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-69d6d975b6-dszbq -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mWwmEkRQOW ++ mktemp + local LAST_ERR=/tmp/tmp.CKP20X5UNn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-69d6d975b6-dszbq -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mWwmEkRQOW + cat /tmp/tmp.CKP20X5UNn + rm /tmp/tmp.mWwmEkRQOW /tmp/tmp.CKP20X5UNn + return 0 + local telemetry_log_file=enabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == enabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.ihawVbOmIN/enabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json + [[ -s /tmp/tmp.ihawVbOmIN/enabled_telemetry.version-service-cr.log.json ]] + local telemetry_cr_log_file=enabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a enabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.gdSFQBA1Xj +++ mktemp ++ local LAST_ERR=/tmp/tmp.ojMNEXJIAN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gdSFQBA1Xj ++ cat /tmp/tmp.ojMNEXJIAN ++ rm /tmp/tmp.gdSFQBA1Xj /tmp/tmp.ojMNEXJIAN ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-6cdf47447-g82sh ++ mktemp + local LAST_OUT=/tmp/tmp.ub2ozuiWba ++ mktemp + local LAST_ERR=/tmp/tmp.UuLQNymXF9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-6cdf47447-g82sh + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ub2ozuiWba pod "percona-server-mongodb-operator-6cdf47447-g82sh" deleted from psmdb-operator namespace + cat /tmp/tmp.UuLQNymXF9 + rm /tmp/tmp.ub2ozuiWba /tmp/tmp.UuLQNymXF9 + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.McLFZHOoBG ++ mktemp + local LAST_ERR=/tmp/tmp.LrbvDpbZze + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.McLFZHOoBG perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.LrbvDpbZze + rm /tmp/tmp.McLFZHOoBG /tmp/tmp.LrbvDpbZze + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.G4gvZ6noAZ ++ mktemp + local LAST_ERR=/tmp/tmp.2poUEeL6lm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G4gvZ6noAZ perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-24327 namespace + cat /tmp/tmp.2poUEeL6lm + rm /tmp/tmp.G4gvZ6noAZ /tmp/tmp.2poUEeL6lm + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.Q8ANToXTrs ++ mktemp + local LAST_ERR=/tmp/tmp.CfL6040yv5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q8ANToXTrs deployment.apps "psmdb-client" deleted from version-service-24327 namespace + cat /tmp/tmp.CfL6040yv5 + rm /tmp/tmp.Q8ANToXTrs /tmp/tmp.CfL6040yv5 + return 0 + sleep 30 + desc 'disabling telemetry on the operator level' + set +o xtrace ----------------------------------------------------------------------------------- disabling telemetry on the operator level ----------------------------------------------------------------------------------- + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pOT0AVgBkH ++ mktemp + local LAST_ERR=/tmp/tmp.4kjan7IMZP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pOT0AVgBkH pod "version-service-cr-567f5fd64b-v9sq4" deleted from psmdb-operator namespace + cat /tmp/tmp.4kjan7IMZP + rm /tmp/tmp.pOT0AVgBkH /tmp/tmp.4kjan7IMZP + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.PVBu9pAJ8j ++ mktemp + local LAST_ERR=/tmp/tmp.fWQHXegi2e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PVBu9pAJ8j pod "version-service-69d6d975b6-dszbq" deleted from psmdb-operator namespace + cat /tmp/tmp.fWQHXegi2e + rm /tmp/tmp.PVBu9pAJ8j /tmp/tmp.fWQHXegi2e + return 0 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.RSkzb1rUvB + local LAST_OUT=/tmp/tmp.y1utGR2kNf ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.n1xiFddX5v + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.VlxPLvJjLO + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.y1utGR2kNf + cat /tmp/tmp.n1xiFddX5v + rm /tmp/tmp.y1utGR2kNf /tmp/tmp.n1xiFddX5v + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RSkzb1rUvB deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.VlxPLvJjLO + rm /tmp/tmp.RSkzb1rUvB /tmp/tmp.VlxPLvJjLO + return 0 ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ local pod_name=26809 +++ kubectl_bin -n default run 26809 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gjVBuoJgSZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uYkrH1C3wq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 26809 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gjVBuoJgSZ +++ cat /tmp/tmp.uYkrH1C3wq +++ rm /tmp/tmp.gjVBuoJgSZ /tmp/tmp.uYkrH1C3wq +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/26809 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6yvYcP7qA8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cM0eWv0kyU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/26809 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6yvYcP7qA8 +++ cat /tmp/tmp.cM0eWv0kyU +++ rm /tmp/tmp.6yvYcP7qA8 /tmp/tmp.cM0eWv0kyU +++ return 0 ++++ kubectl_bin -n default exec 26809 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Jp1DJidknY +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5VtXA0C2Fk ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 26809 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Jp1DJidknY ++++ cat /tmp/tmp.5VtXA0C2Fk ++++ rm /tmp/tmp.Jp1DJidknY /tmp/tmp.5VtXA0C2Fk ++++ return 0 +++ local 'output=db version v7.0.24-13 Build Info: { "version": "7.0.24-13", "gitVersion": "19c9e3cf64fb92e42fc32c2b4eec050db6e03a14", "openSSLVersion": "OpenSSL 3.2.2 4 Jun 2024", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/26809 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UaWQbZFj9B ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ou9yKpJ8qf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/26809 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UaWQbZFj9B +++ cat /tmp/tmp.Ou9yKpJ8qf Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.UaWQbZFj9B /tmp/tmp.Ou9yKpJ8qf +++ return 0 +++ echo db version v7.0.24-13 Build Info: '{' '"version":' '"7.0.24-13",' '"gitVersion":' '"19c9e3cf64fb92e42fc32c2b4eec050db6e03a14",' '"openSSLVersion":' '"OpenSSL' 3.2.2 4 Jun '2024",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.24-13 ++ [[ ! 7.0.24-13 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.24-13 + ACTUAL_MONGOD_VERSION=7.0.24-13 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.rj5k0Gx9qV ++ mktemp + local LAST_ERR=/tmp/tmp.DthZkBwB47 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rj5k0Gx9qV + cat /tmp/tmp.DthZkBwB47 + rm /tmp/tmp.rj5k0Gx9qV /tmp/tmp.DthZkBwB47 + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.XSKpsx42F5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.lEF4azgkXi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XSKpsx42F5 ++ cat /tmp/tmp.lEF4azgkXi ++ rm /tmp/tmp.XSKpsx42F5 /tmp/tmp.lEF4azgkXi ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.AsZCX7d7bZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.c3B5fkT8cz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AsZCX7d7bZ ++ cat /tmp/tmp.c3B5fkT8cz ++ rm /tmp/tmp.AsZCX7d7bZ /tmp/tmp.c3B5fkT8cz ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 7.0-recommended disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=7.0-recommended + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6chQOgydbC ++ mktemp + local LAST_ERR=/tmp/tmp.gqx6Rj94Tk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6chQOgydbC deployment.apps/psmdb-client created + cat /tmp/tmp.gqx6Rj94Tk + rm /tmp/tmp.6chQOgydbC /tmp/tmp.gqx6Rj94Tk + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.YkoKQEPaB7 ++ mktemp + local LAST_ERR=/tmp/tmp.bajvGQDEJL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YkoKQEPaB7 secret/minimal-cluster configured + cat /tmp/tmp.bajvGQDEJL Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.YkoKQEPaB7 /tmp/tmp.bajvGQDEJL + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "7.0-recommended" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cr-minimal.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.L0w5D8rqcS ++ mktemp + local LAST_ERR=/tmp/tmp.YpSUFij6k4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.L0w5D8rqcS perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.YpSUFij6k4 + rm /tmp/tmp.L0w5D8rqcS /tmp/tmp.YpSUFij6k4 + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hwSLURAA97 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TirCQNin3e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hwSLURAA97 ++ cat /tmp/tmp.TirCQNin3e ++ rm /tmp/tmp.hwSLURAA97 /tmp/tmp.TirCQNin3e ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready................OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9HEOKdo97P +++ mktemp ++ local LAST_ERR=/tmp/tmp.QxPIyA5D9D ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9HEOKdo97P ++ cat /tmp/tmp.QxPIyA5D9D ++ rm /tmp/tmp.9HEOKdo97P /tmp/tmp.QxPIyA5D9D ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XDCoKzYteH +++ mktemp ++ local LAST_ERR=/tmp/tmp.NKJLIsLH1e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XDCoKzYteH ++ cat /tmp/tmp.NKJLIsLH1e ++ rm /tmp/tmp.XDCoKzYteH /tmp/tmp.NKJLIsLH1e ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................. + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I2rmmtQbtF +++ mktemp ++ local LAST_ERR=/tmp/tmp.9KLPikB3s5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I2rmmtQbtF ++ cat /tmp/tmp.9KLPikB3s5 ++ rm /tmp/tmp.I2rmmtQbtF /tmp/tmp.9KLPikB3s5 ++ return 0 + local client_container=psmdb-client-66f577db5f-gv9kw + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-gv9kw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tiGAzlR3Rr ++ mktemp + local LAST_ERR=/tmp/tmp.47ERCewQqO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-gv9kw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tiGAzlR3Rr Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("bb7560c3-fd27-46e9-892f-691046a1bf1a") } Percona Server for MongoDB server version: v7.0.5-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.47ERCewQqO + rm /tmp/tmp.tiGAzlR3Rr /tmp/tmp.47ERCewQqO + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hiiKKZQvP0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.CE8rbon2tx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hiiKKZQvP0 ++ cat /tmp/tmp.CE8rbon2tx ++ rm /tmp/tmp.hiiKKZQvP0 /tmp/tmp.CE8rbon2tx ++ return 0 + local client_container=psmdb-client-66f577db5f-gv9kw + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-gv9kw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vthc9GExnm ++ mktemp + local LAST_ERR=/tmp/tmp.b1tMM5399r + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-gv9kw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vthc9GExnm Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9cf208ff-7981-40be-89ed-ebdd9531902c") } Percona Server for MongoDB server version: v7.0.5-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.b1tMM5399r + rm /tmp/tmp.vthc9GExnm /tmp/tmp.b1tMM5399r + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-567f5fd64b-tw8lz -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Oht6tah2DJ ++ mktemp + local LAST_ERR=/tmp/tmp.N4sDl0JtUJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-567f5fd64b-tw8lz -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Oht6tah2DJ + cat /tmp/tmp.N4sDl0JtUJ + rm /tmp/tmp.Oht6tah2DJ /tmp/tmp.N4sDl0JtUJ + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-69d6d975b6-959sz -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.BhdE2UYR5J ++ mktemp + local LAST_ERR=/tmp/tmp.k9wf5Ljwxw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-69d6d975b6-959sz -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BhdE2UYR5J + cat /tmp/tmp.k9wf5Ljwxw + rm /tmp/tmp.BhdE2UYR5J /tmp/tmp.k9wf5Ljwxw + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' 7.0-recommended == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=7.0 + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' 7.0-recommended == 7.0-recommended -a disabled == disabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.ihawVbOmIN/disabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json + [[ -s /tmp/tmp.ihawVbOmIN/disabled_telemetry.version-service.log.json ]] + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' 7.0-recommended == disabled -a disabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z8CYlB2yB4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bUBLFr2Mp2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z8CYlB2yB4 ++ cat /tmp/tmp.bUBLFr2Mp2 ++ rm /tmp/tmp.Z8CYlB2yB4 /tmp/tmp.bUBLFr2Mp2 ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-g796w ++ mktemp + local LAST_OUT=/tmp/tmp.JCv1qmkVKE ++ mktemp + local LAST_ERR=/tmp/tmp.ckLuUAodqM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-g796w + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JCv1qmkVKE pod "percona-server-mongodb-operator-786948844b-g796w" deleted from psmdb-operator namespace + cat /tmp/tmp.ckLuUAodqM + rm /tmp/tmp.JCv1qmkVKE /tmp/tmp.ckLuUAodqM + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.ZbqM1ZkKqP ++ mktemp + local LAST_ERR=/tmp/tmp.BpilRkTbSh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZbqM1ZkKqP perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.BpilRkTbSh + rm /tmp/tmp.ZbqM1ZkKqP /tmp/tmp.BpilRkTbSh + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.i6RevxCVXW ++ mktemp + local LAST_ERR=/tmp/tmp.Tokp3fR2dW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i6RevxCVXW perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-24327 namespace + cat /tmp/tmp.Tokp3fR2dW + rm /tmp/tmp.i6RevxCVXW /tmp/tmp.Tokp3fR2dW + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.rYPODXctDC ++ mktemp + local LAST_ERR=/tmp/tmp.ARZw89OXV7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rYPODXctDC deployment.apps "psmdb-client" deleted from version-service-24327 namespace + cat /tmp/tmp.ARZw89OXV7 + rm /tmp/tmp.rYPODXctDC /tmp/tmp.ARZw89OXV7 + return 0 + sleep 30 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' ++ mktemp + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Do0gtAc91Y ++ mktemp + local LAST_OUT=/tmp/tmp.C222KgaDyR + local LAST_ERR=/tmp/tmp.Q2mlGVYmiq + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.SbiU6FG8Zj + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Do0gtAc91Y + cat /tmp/tmp.Q2mlGVYmiq + rm /tmp/tmp.Do0gtAc91Y /tmp/tmp.Q2mlGVYmiq + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C222KgaDyR deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.SbiU6FG8Zj + rm /tmp/tmp.C222KgaDyR /tmp/tmp.SbiU6FG8Zj + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.7WaA15halK ++ mktemp + local LAST_ERR=/tmp/tmp.wY53qZt1VN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7WaA15halK + cat /tmp/tmp.wY53qZt1VN + rm /tmp/tmp.7WaA15halK /tmp/tmp.wY53qZt1VN + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.trCwaFAXpD +++ mktemp ++ local LAST_ERR=/tmp/tmp.NRKe7HhW9p ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.trCwaFAXpD ++ cat /tmp/tmp.NRKe7HhW9p ++ rm /tmp/tmp.trCwaFAXpD /tmp/tmp.NRKe7HhW9p ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.AsKcOvur6s +++ mktemp ++ local LAST_ERR=/tmp/tmp.UohTOX9kKO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AsKcOvur6s ++ cat /tmp/tmp.UohTOX9kKO ++ rm /tmp/tmp.AsKcOvur6s /tmp/tmp.UohTOX9kKO ++ return 0 + '[' 1 == 1 ']' + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ygxLHKOs6P ++ mktemp + local LAST_ERR=/tmp/tmp.p0T7hXadBY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ygxLHKOs6P pod "version-service-cr-567f5fd64b-tw8lz" deleted from psmdb-operator namespace + cat /tmp/tmp.p0T7hXadBY + rm /tmp/tmp.ygxLHKOs6P /tmp/tmp.p0T7hXadBY + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.7LBIymEdfq ++ mktemp + local LAST_ERR=/tmp/tmp.phFCnS7S3p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7LBIymEdfq pod "version-service-69d6d975b6-959sz" deleted from psmdb-operator namespace + cat /tmp/tmp.phFCnS7S3p + rm /tmp/tmp.7LBIymEdfq /tmp/tmp.phFCnS7S3p + return 0 + check_telemetry_transfer http://version-service-cr:11000 disabled disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.fIqolUuZi6 ++ mktemp + local LAST_ERR=/tmp/tmp.K5hMeFvnj1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fIqolUuZi6 deployment.apps/psmdb-client created + cat /tmp/tmp.K5hMeFvnj1 + rm /tmp/tmp.fIqolUuZi6 /tmp/tmp.K5hMeFvnj1 + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.IdO9sOylwo ++ mktemp + local LAST_ERR=/tmp/tmp.kRJvY9ZfTz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IdO9sOylwo secret/minimal-cluster configured + cat /tmp/tmp.kRJvY9ZfTz Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.IdO9sOylwo /tmp/tmp.kRJvY9ZfTz + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2dSh4lc6Rd ++ mktemp + local LAST_ERR=/tmp/tmp.eDTf0IdhYm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2dSh4lc6Rd perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.eDTf0IdhYm + rm /tmp/tmp.2dSh4lc6Rd /tmp/tmp.eDTf0IdhYm + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8IJTlqZAHh +++ mktemp ++ local LAST_ERR=/tmp/tmp.U2Ak57RgPJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8IJTlqZAHh ++ cat /tmp/tmp.U2Ak57RgPJ ++ rm /tmp/tmp.8IJTlqZAHh /tmp/tmp.U2Ak57RgPJ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready..............OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i6lJychv3P +++ mktemp ++ local LAST_ERR=/tmp/tmp.JtVEds4RSW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i6lJychv3P ++ cat /tmp/tmp.JtVEds4RSW ++ rm /tmp/tmp.i6lJychv3P /tmp/tmp.JtVEds4RSW ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZnKNW1bQR5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.X8BpFrSu1Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZnKNW1bQR5 ++ cat /tmp/tmp.X8BpFrSu1Y ++ rm /tmp/tmp.ZnKNW1bQR5 /tmp/tmp.X8BpFrSu1Y ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................ + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8JWOE1KBNX +++ mktemp ++ local LAST_ERR=/tmp/tmp.YJ487f5K0M ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8JWOE1KBNX ++ cat /tmp/tmp.YJ487f5K0M ++ rm /tmp/tmp.8JWOE1KBNX /tmp/tmp.YJ487f5K0M ++ return 0 + local client_container=psmdb-client-66f577db5f-fzkqx + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-fzkqx -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.cYz0S9Uz5g ++ mktemp + local LAST_ERR=/tmp/tmp.uAI6UusNZD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-fzkqx -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cYz0S9Uz5g Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4399b359-9566-4de5-b30d-4ecdddbbda59") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.uAI6UusNZD + rm /tmp/tmp.cYz0S9Uz5g /tmp/tmp.uAI6UusNZD + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZOR3SRKxfX +++ mktemp ++ local LAST_ERR=/tmp/tmp.P4o1Ny1hOb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZOR3SRKxfX ++ cat /tmp/tmp.P4o1Ny1hOb ++ rm /tmp/tmp.ZOR3SRKxfX /tmp/tmp.P4o1Ny1hOb ++ return 0 + local client_container=psmdb-client-66f577db5f-fzkqx + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-fzkqx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UUQSPevkmS ++ mktemp + local LAST_ERR=/tmp/tmp.89AhCKi9Zz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-fzkqx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UUQSPevkmS Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1a1b4191-d462-41a6-9430-ca1eb0ed43f7") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.89AhCKi9Zz + rm /tmp/tmp.UUQSPevkmS /tmp/tmp.89AhCKi9Zz + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + grep -Eo '\{.*\}' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-567f5fd64b-2wgf5 -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.zImt25AGIY ++ mktemp + local LAST_ERR=/tmp/tmp.10Tw0t9dLC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-567f5fd64b-2wgf5 -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zImt25AGIY + cat /tmp/tmp.10Tw0t9dLC + rm /tmp/tmp.zImt25AGIY /tmp/tmp.10Tw0t9dLC + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-69d6d975b6-pzjfz -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.4f0sdDvQwP ++ mktemp + local LAST_ERR=/tmp/tmp.uvbuQBtUCk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-69d6d975b6-pzjfz -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4f0sdDvQwP + cat /tmp/tmp.uvbuQBtUCk + rm /tmp/tmp.4f0sdDvQwP /tmp/tmp.uvbuQBtUCk + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a disabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == disabled ']' + [[ -s /tmp/tmp.ihawVbOmIN/disabled_telemetry.version-service-cr.log.json ]] + [[ -s /tmp/tmp.ihawVbOmIN/disabled_telemetry.version-service.log.json ]] ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.fGHRcMiawl +++ mktemp ++ local LAST_ERR=/tmp/tmp.hy5qp9gL8X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fGHRcMiawl ++ cat /tmp/tmp.hy5qp9gL8X ++ rm /tmp/tmp.fGHRcMiawl /tmp/tmp.hy5qp9gL8X ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-kfqvd ++ mktemp + local LAST_OUT=/tmp/tmp.jaPwMSGMlx ++ mktemp + local LAST_ERR=/tmp/tmp.GhUTQ3yP54 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-kfqvd + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jaPwMSGMlx pod "percona-server-mongodb-operator-786948844b-kfqvd" deleted from psmdb-operator namespace + cat /tmp/tmp.GhUTQ3yP54 + rm /tmp/tmp.jaPwMSGMlx /tmp/tmp.GhUTQ3yP54 + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.44vVcs7ObI ++ mktemp + local LAST_ERR=/tmp/tmp.l7pIvN43fC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.44vVcs7ObI perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.l7pIvN43fC + rm /tmp/tmp.44vVcs7ObI /tmp/tmp.l7pIvN43fC + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.o9zDdeKDHH ++ mktemp + local LAST_ERR=/tmp/tmp.iGMeIC6VGb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o9zDdeKDHH perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-24327 namespace + cat /tmp/tmp.iGMeIC6VGb + rm /tmp/tmp.o9zDdeKDHH /tmp/tmp.iGMeIC6VGb + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.CND3NWWbYH ++ mktemp + local LAST_ERR=/tmp/tmp.HILDrsrzMr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CND3NWWbYH deployment.apps "psmdb-client" deleted from version-service-24327 namespace + cat /tmp/tmp.HILDrsrzMr + rm /tmp/tmp.CND3NWWbYH /tmp/tmp.HILDrsrzMr + return 0 + sleep 30 + cases=("version-service-exact" "version-service-recommended" "version-service-latest" "version-service-major" "version-service-unreachable") + expected_images=("percona/percona-server-mongodb:6.0.3-2" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:6.0.4-3" "$IMAGE_MONGOD") + for i in "${!cases[@]}" + desc 'test version-service-exact' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-exact ----------------------------------------------------------------------------------- + cluster=version-service-exact + expected_image=percona/percona-server-mongodb:6.0.3-2 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9Eso9YFmTl ++ mktemp + local LAST_ERR=/tmp/tmp.3bNwuTWutI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9Eso9YFmTl secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.3bNwuTWutI + rm /tmp/tmp.9Eso9YFmTl /tmp/tmp.3bNwuTWutI + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.gxBEQvVSSx + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/version-service-exact-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.gxBEQvVSSx + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.s7NnDSy0GT ++ mktemp + local LAST_ERR=/tmp/tmp.bqB2kIJuSw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.s7NnDSy0GT perconaservermongodb.psmdb.percona.com/version-service-exact created + cat /tmp/tmp.bqB2kIJuSw + rm /tmp/tmp.s7NnDSy0GT /tmp/tmp.bqB2kIJuSw + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-exact-rs0 3 + local name=version-service-exact-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-exact ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-exact-rs0-0 + local pod=version-service-exact-rs0-0 + set +o xtrace waiting for pod/version-service-exact-rs0-0 to be ready..................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-exact-rs0-1 + local pod=version-service-exact-rs0-1 + set +o xtrace waiting for pod/version-service-exact-rs0-1 to be ready..................OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fwJKEQFFaI +++ mktemp ++ local LAST_ERR=/tmp/tmp.k7w2K1AKdr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fwJKEQFFaI ++ cat /tmp/tmp.k7w2K1AKdr ++ rm /tmp/tmp.fwJKEQFFaI /tmp/tmp.k7w2K1AKdr ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-exact-rs0-2 + local pod=version-service-exact-rs0-2 + set +o xtrace waiting for pod/version-service-exact-rs0-2 to be ready.................OK ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CAAbT0N7yr +++ mktemp ++ local LAST_ERR=/tmp/tmp.OiH8RRNl94 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CAAbT0N7yr ++ cat /tmp/tmp.OiH8RRNl94 ++ rm /tmp/tmp.CAAbT0N7yr /tmp/tmp.OiH8RRNl94 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lnnQpRvk35 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eMh3PEHASg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lnnQpRvk35 ++ cat /tmp/tmp.eMh3PEHASg ++ rm /tmp/tmp.lnnQpRvk35 /tmp/tmp.eMh3PEHASg ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1uRYlCSjMv ++ mktemp + local LAST_ERR=/tmp/tmp.NliGaeheO3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1uRYlCSjMv + cat /tmp/tmp.NliGaeheO3 + rm /tmp/tmp.1uRYlCSjMv /tmp/tmp.NliGaeheO3 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-exact-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-exact-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ClATksTgHu +++ mktemp ++ local LAST_ERR=/tmp/tmp.JY0oJG6R9u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ClATksTgHu ++ cat /tmp/tmp.JY0oJG6R9u ++ rm /tmp/tmp.ClATksTgHu /tmp/tmp.JY0oJG6R9u ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-exact-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.KSflAzfnUe ++ mktemp + local LAST_ERR=/tmp/tmp.zRvrMNR7P4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KSflAzfnUe Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-1.version-service-exact-rs0.version-service-24327.svc.cluster.local:27017,version-service-exact-rs0-2.version-service-exact-rs0.version-service-24327.svc.cluster.local:27017,version-service-exact-rs0-0.version-service-exact-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7fb018e1-7ada-4ea5-a069-e9f1307fbbac") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.zRvrMNR7P4 + rm /tmp/tmp.KSflAzfnUe /tmp/tmp.zRvrMNR7P4 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-exact-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-exact-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SasKJSwrmq +++ mktemp ++ local LAST_ERR=/tmp/tmp.141E6ZH0Pn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SasKJSwrmq ++ cat /tmp/tmp.141E6ZH0Pn ++ rm /tmp/tmp.SasKJSwrmq /tmp/tmp.141E6ZH0Pn ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ myApp:myPass@version-service-exact-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bXBjQZh2Sf ++ mktemp + local LAST_ERR=/tmp/tmp.f1wcoBn0YX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bXBjQZh2Sf Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-0.version-service-exact-rs0.version-service-24327.svc.cluster.local:27017,version-service-exact-rs0-1.version-service-exact-rs0.version-service-24327.svc.cluster.local:27017,version-service-exact-rs0-2.version-service-exact-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ae4c398a-c089-445f-a6c5-d1449c7b5163") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.f1wcoBn0YX + rm /tmp/tmp.bXBjQZh2Sf /tmp/tmp.f1wcoBn0YX + return 0 + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.QBmv6NcUaF ++ mktemp + local LAST_ERR=/tmp/tmp.1V0I9VzUVx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QBmv6NcUaF + cat /tmp/tmp.1V0I9VzUVx + rm /tmp/tmp.QBmv6NcUaF /tmp/tmp.1V0I9VzUVx + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-exact-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.X1xZgQDPf7 ++ mktemp + local LAST_ERR=/tmp/tmp.MOHLrueFyJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X1xZgQDPf7 perconaservermongodb.psmdb.percona.com "version-service-exact" deleted from version-service-24327 namespace + cat /tmp/tmp.MOHLrueFyJ + rm /tmp/tmp.X1xZgQDPf7 /tmp/tmp.MOHLrueFyJ + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.NMX04JVSgg +++ mktemp ++ local LAST_ERR=/tmp/tmp.VdLrBottNo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NMX04JVSgg ++ cat /tmp/tmp.VdLrBottNo ++ rm /tmp/tmp.NMX04JVSgg /tmp/tmp.VdLrBottNo ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-llk8w pod "percona-server-mongodb-operator-786948844b-llk8w" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-recommended' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-recommended ----------------------------------------------------------------------------------- + cluster=version-service-recommended + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.enmqGH0LG7 ++ mktemp + local LAST_ERR=/tmp/tmp.Ay9evAnnFU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.enmqGH0LG7 secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.Ay9evAnnFU Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.enmqGH0LG7 /tmp/tmp.Ay9evAnnFU + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.guDcnvvwNT + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/version-service-recommended-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.guDcnvvwNT + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ULBznepf02 ++ mktemp + local LAST_ERR=/tmp/tmp.kkCmFfJu6J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ULBznepf02 perconaservermongodb.psmdb.percona.com/version-service-recommended created + cat /tmp/tmp.kkCmFfJu6J + rm /tmp/tmp.ULBznepf02 /tmp/tmp.kkCmFfJu6J + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-recommended-rs0 3 + local name=version-service-recommended-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-recommended ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-recommended-rs0-0 + local pod=version-service-recommended-rs0-0 + set +o xtrace waiting for pod/version-service-recommended-rs0-0 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-recommended-rs0-1 + local pod=version-service-recommended-rs0-1 + set +o xtrace waiting for pod/version-service-recommended-rs0-1 to be ready.................OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W10xGLXO90 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BAt267Pvm2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W10xGLXO90 ++ cat /tmp/tmp.BAt267Pvm2 ++ rm /tmp/tmp.W10xGLXO90 /tmp/tmp.BAt267Pvm2 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-recommended-rs0-2 + local pod=version-service-recommended-rs0-2 + set +o xtrace waiting for pod/version-service-recommended-rs0-2 to be ready....................OK ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lJpcB4ho94 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XEyMrl1pcC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lJpcB4ho94 ++ cat /tmp/tmp.XEyMrl1pcC ++ rm /tmp/tmp.lJpcB4ho94 /tmp/tmp.XEyMrl1pcC ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tItabugpyc +++ mktemp ++ local LAST_ERR=/tmp/tmp.C8yB644N25 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tItabugpyc ++ cat /tmp/tmp.C8yB644N25 ++ rm /tmp/tmp.tItabugpyc /tmp/tmp.C8yB644N25 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.GHpYQ5cZcr ++ mktemp + local LAST_ERR=/tmp/tmp.QLm87wthxw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GHpYQ5cZcr + cat /tmp/tmp.QLm87wthxw + rm /tmp/tmp.GHpYQ5cZcr /tmp/tmp.QLm87wthxw + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uGfcwAFq7E +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vtl5FRaX8V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uGfcwAFq7E ++ cat /tmp/tmp.Vtl5FRaX8V ++ rm /tmp/tmp.uGfcwAFq7E /tmp/tmp.Vtl5FRaX8V ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.vlgNdlypRF ++ mktemp + local LAST_ERR=/tmp/tmp.HdoBEF7l9a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vlgNdlypRF Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-24327.svc.cluster.local:27017,version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-24327.svc.cluster.local:27017,version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b9f866d5-e7c1-4d8f-ba72-67b311843db2") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.HdoBEF7l9a + rm /tmp/tmp.vlgNdlypRF /tmp/tmp.HdoBEF7l9a + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-recommended-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-recommended-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GHZQ7JvGNk +++ mktemp ++ local LAST_ERR=/tmp/tmp.P412dQfUVJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GHZQ7JvGNk ++ cat /tmp/tmp.P412dQfUVJ ++ rm /tmp/tmp.GHZQ7JvGNk /tmp/tmp.P412dQfUVJ ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ myApp:myPass@version-service-recommended-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.AYhssJGbPU ++ mktemp + local LAST_ERR=/tmp/tmp.RxVaVPutKG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AYhssJGbPU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-24327.svc.cluster.local:27017,version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-24327.svc.cluster.local:27017,version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c0a6339c-fe1e-444a-93f8-cc9053bafe1b") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RxVaVPutKG + rm /tmp/tmp.AYhssJGbPU /tmp/tmp.RxVaVPutKG + return 0 + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.REAJJ2RUrL ++ mktemp + local LAST_ERR=/tmp/tmp.aXwMIvjHGs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.REAJJ2RUrL + cat /tmp/tmp.aXwMIvjHGs + rm /tmp/tmp.REAJJ2RUrL /tmp/tmp.aXwMIvjHGs + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-recommended-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.whAFe7BaXt ++ mktemp + local LAST_ERR=/tmp/tmp.7kgVI2ZRE6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.whAFe7BaXt perconaservermongodb.psmdb.percona.com "version-service-recommended" deleted from version-service-24327 namespace + cat /tmp/tmp.7kgVI2ZRE6 + rm /tmp/tmp.whAFe7BaXt /tmp/tmp.7kgVI2ZRE6 + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.43zFAG1Ulo +++ mktemp ++ local LAST_ERR=/tmp/tmp.xCvk4IZsTG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.43zFAG1Ulo ++ cat /tmp/tmp.xCvk4IZsTG ++ rm /tmp/tmp.43zFAG1Ulo /tmp/tmp.xCvk4IZsTG ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-vtbwm pod "percona-server-mongodb-operator-786948844b-vtbwm" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-latest' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-latest ----------------------------------------------------------------------------------- + cluster=version-service-latest + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.qcuIG56k7U ++ mktemp + local LAST_ERR=/tmp/tmp.aXiKXiyQz0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qcuIG56k7U secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.aXiKXiyQz0 Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.qcuIG56k7U /tmp/tmp.aXiKXiyQz0 + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.Yf9oekRYSJ + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/version-service-latest-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.Yf9oekRYSJ + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.UAIO0Buq5q ++ mktemp + local LAST_ERR=/tmp/tmp.wMdXxp5ybp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UAIO0Buq5q perconaservermongodb.psmdb.percona.com/version-service-latest created + cat /tmp/tmp.wMdXxp5ybp + rm /tmp/tmp.UAIO0Buq5q /tmp/tmp.wMdXxp5ybp + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-latest-rs0 3 + local name=version-service-latest-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-latest ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-latest-rs0-0 + local pod=version-service-latest-rs0-0 + set +o xtrace waiting for pod/version-service-latest-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-latest-rs0-1 + local pod=version-service-latest-rs0-1 + set +o xtrace waiting for pod/version-service-latest-rs0-1 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fRdRsfib2t +++ mktemp ++ local LAST_ERR=/tmp/tmp.S9H1WuG6Ic ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fRdRsfib2t ++ cat /tmp/tmp.S9H1WuG6Ic ++ rm /tmp/tmp.fRdRsfib2t /tmp/tmp.S9H1WuG6Ic ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-latest-rs0-2 + local pod=version-service-latest-rs0-2 + set +o xtrace waiting for pod/version-service-latest-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6KF5pq7VMr +++ mktemp ++ local LAST_ERR=/tmp/tmp.fmt5i6vwtD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6KF5pq7VMr ++ cat /tmp/tmp.fmt5i6vwtD ++ rm /tmp/tmp.6KF5pq7VMr /tmp/tmp.fmt5i6vwtD ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AR2pDbEqNy +++ mktemp ++ local LAST_ERR=/tmp/tmp.tPozayfb18 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AR2pDbEqNy ++ cat /tmp/tmp.tPozayfb18 ++ rm /tmp/tmp.AR2pDbEqNy /tmp/tmp.tPozayfb18 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ODZnt5oPVw ++ mktemp + local LAST_ERR=/tmp/tmp.gw2cp3scfm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ODZnt5oPVw + cat /tmp/tmp.gw2cp3scfm + rm /tmp/tmp.ODZnt5oPVw /tmp/tmp.gw2cp3scfm + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-latest-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-latest-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bm5zbtgDh8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.j9e4D7DY9L ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bm5zbtgDh8 ++ cat /tmp/tmp.j9e4D7DY9L ++ rm /tmp/tmp.Bm5zbtgDh8 /tmp/tmp.j9e4D7DY9L ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-latest-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.3xxSSFQO8D ++ mktemp + local LAST_ERR=/tmp/tmp.Hmf3OvuCMb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3xxSSFQO8D Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-2.version-service-latest-rs0.version-service-24327.svc.cluster.local:27017,version-service-latest-rs0-0.version-service-latest-rs0.version-service-24327.svc.cluster.local:27017,version-service-latest-rs0-1.version-service-latest-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("96c28077-fe5c-4194-9b13-abf24b2997cc") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.Hmf3OvuCMb + rm /tmp/tmp.3xxSSFQO8D /tmp/tmp.Hmf3OvuCMb + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-latest-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-latest-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3FpRip4c8S +++ mktemp ++ local LAST_ERR=/tmp/tmp.HyHpI3zSPw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3FpRip4c8S ++ cat /tmp/tmp.HyHpI3zSPw ++ rm /tmp/tmp.3FpRip4c8S /tmp/tmp.HyHpI3zSPw ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ myApp:myPass@version-service-latest-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.twKVK2zxif ++ mktemp + local LAST_ERR=/tmp/tmp.B5CHuMz93k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.twKVK2zxif Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-0.version-service-latest-rs0.version-service-24327.svc.cluster.local:27017,version-service-latest-rs0-1.version-service-latest-rs0.version-service-24327.svc.cluster.local:27017,version-service-latest-rs0-2.version-service-latest-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0cef449d-7967-4666-93dd-4df36be10042") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.B5CHuMz93k + rm /tmp/tmp.twKVK2zxif /tmp/tmp.B5CHuMz93k + return 0 + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.6xJ8RUJNoL ++ mktemp + local LAST_ERR=/tmp/tmp.VwxuSfCNKY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6xJ8RUJNoL + cat /tmp/tmp.VwxuSfCNKY + rm /tmp/tmp.6xJ8RUJNoL /tmp/tmp.VwxuSfCNKY + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-latest-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.5mLFVn3O2H ++ mktemp + local LAST_ERR=/tmp/tmp.ifbRngX0Ps + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5mLFVn3O2H perconaservermongodb.psmdb.percona.com "version-service-latest" deleted from version-service-24327 namespace + cat /tmp/tmp.ifbRngX0Ps + rm /tmp/tmp.5mLFVn3O2H /tmp/tmp.ifbRngX0Ps + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.WYwwZyylnb +++ mktemp ++ local LAST_ERR=/tmp/tmp.GWz3qAFmWe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WYwwZyylnb ++ cat /tmp/tmp.GWz3qAFmWe ++ rm /tmp/tmp.WYwwZyylnb /tmp/tmp.GWz3qAFmWe ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-vj69v pod "percona-server-mongodb-operator-786948844b-vj69v" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-major' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-major ----------------------------------------------------------------------------------- + cluster=version-service-major + expected_image=percona/percona-server-mongodb:6.0.4-3 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BAQvhOWovw ++ mktemp + local LAST_ERR=/tmp/tmp.gXFaCWNmXm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BAQvhOWovw secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.gXFaCWNmXm Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.BAQvhOWovw /tmp/tmp.gXFaCWNmXm + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.nu7fwxHgbo + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/version-service-major-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.nu7fwxHgbo + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.UtTVCbl81q ++ mktemp + local LAST_ERR=/tmp/tmp.RZIFd8eeb3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UtTVCbl81q perconaservermongodb.psmdb.percona.com/version-service-major created + cat /tmp/tmp.RZIFd8eeb3 + rm /tmp/tmp.UtTVCbl81q /tmp/tmp.RZIFd8eeb3 + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-major-rs0 3 + local name=version-service-major-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-major ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-major-rs0-0 + local pod=version-service-major-rs0-0 + set +o xtrace waiting for pod/version-service-major-rs0-0 to be ready.................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-major-rs0-1 + local pod=version-service-major-rs0-1 + set +o xtrace waiting for pod/version-service-major-rs0-1 to be ready.................OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E2LKLvfkIX +++ mktemp ++ local LAST_ERR=/tmp/tmp.KKih29V3yJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E2LKLvfkIX ++ cat /tmp/tmp.KKih29V3yJ ++ rm /tmp/tmp.E2LKLvfkIX /tmp/tmp.KKih29V3yJ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-major-rs0-2 + local pod=version-service-major-rs0-2 + set +o xtrace waiting for pod/version-service-major-rs0-2 to be ready...................OK ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rQpslji3JU +++ mktemp ++ local LAST_ERR=/tmp/tmp.jAvnLzNOd7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rQpslji3JU ++ cat /tmp/tmp.jAvnLzNOd7 ++ rm /tmp/tmp.rQpslji3JU /tmp/tmp.jAvnLzNOd7 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DtyjOrpdCO +++ mktemp ++ local LAST_ERR=/tmp/tmp.UoLkPznUM5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DtyjOrpdCO ++ cat /tmp/tmp.UoLkPznUM5 ++ rm /tmp/tmp.DtyjOrpdCO /tmp/tmp.UoLkPznUM5 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.52sqJepUTn ++ mktemp + local LAST_ERR=/tmp/tmp.AoJYWa5nlw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.52sqJepUTn + cat /tmp/tmp.AoJYWa5nlw + rm /tmp/tmp.52sqJepUTn /tmp/tmp.AoJYWa5nlw + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-major-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-major-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FLYCxZEh68 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qXdk2hyWFv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FLYCxZEh68 ++ cat /tmp/tmp.qXdk2hyWFv ++ rm /tmp/tmp.FLYCxZEh68 /tmp/tmp.qXdk2hyWFv ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-major-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.w7DXCCVzaz ++ mktemp + local LAST_ERR=/tmp/tmp.isSylO88Hw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.w7DXCCVzaz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-2.version-service-major-rs0.version-service-24327.svc.cluster.local:27017,version-service-major-rs0-0.version-service-major-rs0.version-service-24327.svc.cluster.local:27017,version-service-major-rs0-1.version-service-major-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("5d188bbe-746a-446e-b86d-c2eb9f63b254") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.isSylO88Hw + rm /tmp/tmp.w7DXCCVzaz /tmp/tmp.isSylO88Hw + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-major-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-major-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5z3Jbcibrj +++ mktemp ++ local LAST_ERR=/tmp/tmp.GyCqcFsJDA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5z3Jbcibrj ++ cat /tmp/tmp.GyCqcFsJDA ++ rm /tmp/tmp.5z3Jbcibrj /tmp/tmp.GyCqcFsJDA ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ myApp:myPass@version-service-major-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.NXSbbye3Es ++ mktemp + local LAST_ERR=/tmp/tmp.xzPYjpGVDD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NXSbbye3Es Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-0.version-service-major-rs0.version-service-24327.svc.cluster.local:27017,version-service-major-rs0-1.version-service-major-rs0.version-service-24327.svc.cluster.local:27017,version-service-major-rs0-2.version-service-major-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a3ecde35-dbdf-493e-b273-9fb18cf4a149") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.xzPYjpGVDD + rm /tmp/tmp.NXSbbye3Es /tmp/tmp.xzPYjpGVDD + return 0 + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ZyIPZP7309 ++ mktemp + local LAST_ERR=/tmp/tmp.GeO5FtWQId + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZyIPZP7309 + cat /tmp/tmp.GeO5FtWQId + rm /tmp/tmp.ZyIPZP7309 /tmp/tmp.GeO5FtWQId + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-major-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.e6ktIJgZjc ++ mktemp + local LAST_ERR=/tmp/tmp.yC9FWgv9YG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e6ktIJgZjc perconaservermongodb.psmdb.percona.com "version-service-major" deleted from version-service-24327 namespace + cat /tmp/tmp.yC9FWgv9YG + rm /tmp/tmp.e6ktIJgZjc /tmp/tmp.yC9FWgv9YG + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.lyoCU8f1xz +++ mktemp ++ local LAST_ERR=/tmp/tmp.4iaX3JMsAM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lyoCU8f1xz ++ cat /tmp/tmp.4iaX3JMsAM ++ rm /tmp/tmp.lyoCU8f1xz /tmp/tmp.4iaX3JMsAM ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-r7t8v pod "percona-server-mongodb-operator-786948844b-r7t8v" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-unreachable' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-unreachable ----------------------------------------------------------------------------------- + cluster=version-service-unreachable + expected_image=perconalab/percona-server-mongodb-operator:main-mongod7.0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.PEBLDeJ31h ++ mktemp + local LAST_ERR=/tmp/tmp.OksO2lOZX1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PEBLDeJ31h secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.OksO2lOZX1 Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.PEBLDeJ31h /tmp/tmp.OksO2lOZX1 + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.7ArLCf0d5I + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-2045-4cbffc79%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/conf/version-service-unreachable-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.7ArLCf0d5I + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.mIs0lpjiE9 ++ mktemp + local LAST_ERR=/tmp/tmp.cAJIiXIt8d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mIs0lpjiE9 perconaservermongodb.psmdb.percona.com/version-service-unreachable created + cat /tmp/tmp.cAJIiXIt8d + rm /tmp/tmp.mIs0lpjiE9 /tmp/tmp.cAJIiXIt8d + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-unreachable-rs0 3 + local name=version-service-unreachable-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-unreachable ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-unreachable-rs0-0 + local pod=version-service-unreachable-rs0-0 + set +o xtrace waiting for pod/version-service-unreachable-rs0-0 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-unreachable-rs0-1 + local pod=version-service-unreachable-rs0-1 + set +o xtrace waiting for pod/version-service-unreachable-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FN5pSsBWCm +++ mktemp ++ local LAST_ERR=/tmp/tmp.rgf0hHWKIK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FN5pSsBWCm ++ cat /tmp/tmp.rgf0hHWKIK ++ rm /tmp/tmp.FN5pSsBWCm /tmp/tmp.rgf0hHWKIK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-unreachable-rs0-2 + local pod=version-service-unreachable-rs0-2 + set +o xtrace waiting for pod/version-service-unreachable-rs0-2 to be ready.................OK ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yREgHsI1wm +++ mktemp ++ local LAST_ERR=/tmp/tmp.bASmnB4leT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yREgHsI1wm ++ cat /tmp/tmp.bASmnB4leT ++ rm /tmp/tmp.yREgHsI1wm /tmp/tmp.bASmnB4leT ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ePzWyXrCxh +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qe094eCick ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ePzWyXrCxh ++ cat /tmp/tmp.Qe094eCick ++ rm /tmp/tmp.ePzWyXrCxh /tmp/tmp.Qe094eCick ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.SHSxcPE53F ++ mktemp + local LAST_ERR=/tmp/tmp.MYyLhrAuDh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SHSxcPE53F + cat /tmp/tmp.MYyLhrAuDh + rm /tmp/tmp.SHSxcPE53F /tmp/tmp.MYyLhrAuDh + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-24327 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1ZmVxkCjKT +++ mktemp ++ local LAST_ERR=/tmp/tmp.GZo5Bu60ae ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1ZmVxkCjKT ++ cat /tmp/tmp.GZo5Bu60ae ++ rm /tmp/tmp.1ZmVxkCjKT /tmp/tmp.GZo5Bu60ae ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.E5XWt0Ze0b ++ mktemp + local LAST_ERR=/tmp/tmp.kXQnLejdfb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E5XWt0Ze0b Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-24327.svc.cluster.local:27017,version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-24327.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("696dd24c-feaf-40d3-b264-40f0c0945911") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.kXQnLejdfb + rm /tmp/tmp.E5XWt0Ze0b /tmp/tmp.kXQnLejdfb + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-unreachable-rs0.version-service-24327 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-unreachable-rs0.version-service-24327 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1tSpf3FAuh +++ mktemp ++ local LAST_ERR=/tmp/tmp.NXoRB8n0ju ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1tSpf3FAuh ++ cat /tmp/tmp.NXoRB8n0ju ++ rm /tmp/tmp.1tSpf3FAuh /tmp/tmp.NXoRB8n0ju ++ return 0 + local client_container=psmdb-client-66f577db5f-r86zv + local mongo_flag= + [[ myApp:myPass@version-service-unreachable-rs0.version-service-24327 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.ReWUZxnYmt ++ mktemp + local LAST_ERR=/tmp/tmp.RWEkjWeiKp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-66f577db5f-r86zv -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-24327.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ReWUZxnYmt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-24327.svc.cluster.local:27017,version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-24327.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-24327.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("aac88e86-2561-4420-9bf4-8d83fca8d8fb") } Percona Server for MongoDB server version: v7.0.24-13 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.RWEkjWeiKp + rm /tmp/tmp.ReWUZxnYmt /tmp/tmp.RWEkjWeiKp + return 0 + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-24327", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.9VT47yy6dY ++ mktemp + local LAST_ERR=/tmp/tmp.oQdl2B69Vf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9VT47yy6dY + cat /tmp/tmp.oQdl2B69Vf + rm /tmp/tmp.9VT47yy6dY /tmp/tmp.oQdl2B69Vf + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.ihawVbOmIN/statefulset_version-service-unreachable-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod7.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod7.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod7.0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod7.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod7.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod7.0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod7.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod7.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod7.0 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.u2pEax9QoA ++ mktemp + local LAST_ERR=/tmp/tmp.ypqJepnOU4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u2pEax9QoA perconaservermongodb.psmdb.percona.com "version-service-unreachable" deleted from version-service-24327 namespace + cat /tmp/tmp.ypqJepnOU4 + rm /tmp/tmp.u2pEax9QoA /tmp/tmp.ypqJepnOU4 + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.mvy2Ju4urW +++ mktemp ++ local LAST_ERR=/tmp/tmp.4QuRK1Qqpk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mvy2Ju4urW ++ cat /tmp/tmp.4QuRK1Qqpk ++ rm /tmp/tmp.mvy2Ju4urW /tmp/tmp.4QuRK1Qqpk ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-786948844b-rz2l9 pod "percona-server-mongodb-operator-786948844b-rz2l9" deleted from psmdb-operator namespace + sleep 10 + destroy version-service-24327 + local namespace=version-service-24327 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.v5AFqMXpR4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8AELEoKKiQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v5AFqMXpR4 ++ cat /tmp/tmp.8AELEoKKiQ No resources found in version-service-24327 namespace. ++ rm /tmp/tmp.v5AFqMXpR4 /tmp/tmp.8AELEoKKiQ ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.5cS7Sbi7Go ++ mktemp + local LAST_ERR=/tmp/tmp.nbuFjffO2V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5cS7Sbi7Go customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.nbuFjffO2V + rm /tmp/tmp.5cS7Sbi7Go /tmp/tmp.nbuFjffO2V + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.NvQkB7dLko ++ mktemp + local LAST_ERR=/tmp/tmp.Nl9W8X47ME + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NvQkB7dLko + cat /tmp/tmp.Nl9W8X47ME + rm /tmp/tmp.NvQkB7dLko /tmp/tmp.Nl9W8X47ME + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.9Qx6cdfNQE ++ mktemp + local LAST_ERR=/tmp/tmp.KLIDTzlZXf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9Qx6cdfNQE + cat /tmp/tmp.KLIDTzlZXf + rm /tmp/tmp.9Qx6cdfNQE /tmp/tmp.KLIDTzlZXf + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.nq7HqXDRFz ++ mktemp + local LAST_ERR=/tmp/tmp.MqXbMXhPnd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nq7HqXDRFz + cat /tmp/tmp.MqXbMXhPnd + rm /tmp/tmp.nq7HqXDRFz /tmp/tmp.MqXbMXhPnd + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.R7tCWZpBwN ++ mktemp + local LAST_ERR=/tmp/tmp.oqzYbog1RT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2045/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R7tCWZpBwN clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.oqzYbog1RT + rm /tmp/tmp.R7tCWZpBwN /tmp/tmp.oqzYbog1RT + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.v5LlZPF7Yw ++ mktemp + local LAST_ERR=/tmp/tmp.EMj4Sm9op9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.v5LlZPF7Yw + cat /tmp/tmp.EMj4Sm9op9 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.v5LlZPF7Yw + cat /tmp/tmp.EMj4Sm9op9 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.v5LlZPF7Yw + cat /tmp/tmp.EMj4Sm9op9 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.v5LlZPF7Yw + cat /tmp/tmp.EMj4Sm9op9 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.v5LlZPF7Yw /tmp/tmp.EMj4Sm9op9 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace version-service-24327 + rm -rf /tmp/tmp.ihawVbOmIN + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.JZiIcBS14K + local LAST_OUT=/tmp/tmp.HNY7IxSWEZ ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.DZYuZ6zRse + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.0ZuBjBDdMg + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace version-service-24327 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator