Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/logs/version-service.log WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.33) and server (1.30) exceeds the supported minor version skew of +/-1 + create_infra version-service-7403 + local ns=version-service-7403 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.4aUdTr3DIV ++ mktemp + local LAST_ERR=/tmp/tmp.1IaOCi0F60 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4aUdTr3DIV customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.1IaOCi0F60 + rm /tmp/tmp.4aUdTr3DIV /tmp/tmp.1IaOCi0F60 + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.av73O3CFSa ++ mktemp + local LAST_ERR=/tmp/tmp.xdLpDIpvOq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.av73O3CFSa + cat /tmp/tmp.xdLpDIpvOq + rm /tmp/tmp.av73O3CFSa /tmp/tmp.xdLpDIpvOq + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.OZsFXXIjcG ++ mktemp + local LAST_ERR=/tmp/tmp.E7BbqYSMpS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OZsFXXIjcG + cat /tmp/tmp.E7BbqYSMpS + rm /tmp/tmp.OZsFXXIjcG /tmp/tmp.E7BbqYSMpS + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.fA9dgy7rzA ++ mktemp + local LAST_ERR=/tmp/tmp.Lye6iqIMyQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fA9dgy7rzA + cat /tmp/tmp.Lye6iqIMyQ + rm /tmp/tmp.fA9dgy7rzA /tmp/tmp.Lye6iqIMyQ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.RTIHtZwCZj ++ mktemp + local LAST_ERR=/tmp/tmp.De0vAZkmJx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RTIHtZwCZj clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.De0vAZkmJx + rm /tmp/tmp.RTIHtZwCZj /tmp/tmp.De0vAZkmJx + return 0 + check_crd_for_deletion PR-1855-bb3d32e4 + local git_tag=PR-1855-bb3d32e4 ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1855-bb3d32e4/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eBPSqWsdJe +++ mktemp ++ local LAST_ERR=/tmp/tmp.Vs25dTWSC3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.eBPSqWsdJe ++ cat /tmp/tmp.Vs25dTWSC3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.eBPSqWsdJe ++ cat /tmp/tmp.Vs25dTWSC3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.eBPSqWsdJe ++ cat /tmp/tmp.Vs25dTWSC3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.eBPSqWsdJe ++ cat /tmp/tmp.Vs25dTWSC3 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.eBPSqWsdJe /tmp/tmp.Vs25dTWSC3 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.uzHx7TE7fk ++ mktemp + awk '{print$1}' + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.KRU6KphHnU ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.YAZe29ulIN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.zWsbnibiL9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KRU6KphHnU + cat /tmp/tmp.YAZe29ulIN + rm /tmp/tmp.KRU6KphHnU /tmp/tmp.YAZe29ulIN + return 0 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "version-service-11843" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uzHx7TE7fk namespace "psmdb-operator" deleted + cat /tmp/tmp.zWsbnibiL9 + rm /tmp/tmp.uzHx7TE7fk /tmp/tmp.zWsbnibiL9 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.XDxXPqfUuK ++ mktemp + local LAST_ERR=/tmp/tmp.kw8glLRWK9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XDxXPqfUuK + cat /tmp/tmp.kw8glLRWK9 + rm /tmp/tmp.XDxXPqfUuK /tmp/tmp.kw8glLRWK9 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pGjw48KcJN ++ mktemp + local LAST_ERR=/tmp/tmp.1BvJx7bb6W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pGjw48KcJN namespace/psmdb-operator created + cat /tmp/tmp.1BvJx7bb6W + rm /tmp/tmp.pGjw48KcJN /tmp/tmp.1BvJx7bb6W + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.qxhfAmF173 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0yrPOGTFuz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qxhfAmF173 ++ cat /tmp/tmp.0yrPOGTFuz ++ rm /tmp/tmp.qxhfAmF173 /tmp/tmp.0yrPOGTFuz ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.NOKZO2CIWY ++ mktemp + local LAST_ERR=/tmp/tmp.lQm54V32KH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NOKZO2CIWY Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster6" modified. + cat /tmp/tmp.lQm54V32KH + rm /tmp/tmp.NOKZO2CIWY /tmp/tmp.lQm54V32KH + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.7hXD2FRXlb ++ mktemp + local LAST_ERR=/tmp/tmp.ac24DcwwhX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7hXD2FRXlb customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.ac24DcwwhX + rm /tmp/tmp.7hXD2FRXlb /tmp/tmp.ac24DcwwhX + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: psmdb-operator^' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Rty6ZsLD7m ++ mktemp + local LAST_ERR=/tmp/tmp.7EcasQNj4x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Rty6ZsLD7m clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.7EcasQNj4x + rm /tmp/tmp.Rty6ZsLD7m /tmp/tmp.7EcasQNj4x + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.CFlSgMDeQy ++ mktemp + local LAST_ERR=/tmp/tmp.W2u2D06WbH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CFlSgMDeQy deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.W2u2D06WbH + rm /tmp/tmp.CFlSgMDeQy /tmp/tmp.W2u2D06WbH + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.kl1zJEkWIu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q5GA9sEsdT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kl1zJEkWIu ++ cat /tmp/tmp.Q5GA9sEsdT ++ rm /tmp/tmp.kl1zJEkWIu /tmp/tmp.Q5GA9sEsdT ++ return 0 + wait_pod percona-server-mongodb-operator-79f5d4dcd6-d2d6s + local pod=percona-server-mongodb-operator-79f5d4dcd6-d2d6s + set +o xtrace waiting for pod/percona-server-mongodb-operator-79f5d4dcd6-d2d6s to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.aDlAPTkGhy +++ mktemp ++ local LAST_ERR=/tmp/tmp.9vm0zSKCLV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aDlAPTkGhy ++ cat /tmp/tmp.9vm0zSKCLV ++ rm /tmp/tmp.aDlAPTkGhy /tmp/tmp.9vm0zSKCLV ++ return 0 + kubectl_bin logs percona-server-mongodb-operator-79f5d4dcd6-d2d6s ++ mktemp + local LAST_OUT=/tmp/tmp.AJHcvKcgGr ++ mktemp + local LAST_ERR=/tmp/tmp.Qnyls5O1kG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs percona-server-mongodb-operator-79f5d4dcd6-d2d6s + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AJHcvKcgGr + cat /tmp/tmp.Qnyls5O1kG + rm /tmp/tmp.AJHcvKcgGr /tmp/tmp.Qnyls5O1kG + return 0 2025-05-28T17:45:33.769Z INFO setup Manager starting up {"gitCommit": "bb3d32e498b3a92c096b809e60928f73eda8778f", "gitBranch": "PR-1855-bb3d32e4", "buildTime": "", "goVersion": "go1.24.3", "os": "linux", "arch": "amd64"} + create_namespace version-service-7403 + local namespace=version-service-7403 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces version-service-7403' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces version-service-7403 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace version-service-7403 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.trAZVyz2OI + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + kubectl_bin get ns + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + local LAST_ERR=/tmp/tmp.1r12vJFONX + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_OUT=/tmp/tmp.P4ckVXKO8p ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace version-service-7403 --ignore-not-found + local LAST_ERR=/tmp/tmp.bjn9dnkBky + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P4ckVXKO8p + cat /tmp/tmp.bjn9dnkBky + rm /tmp/tmp.P4ckVXKO8p /tmp/tmp.bjn9dnkBky + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.trAZVyz2OI + cat /tmp/tmp.1r12vJFONX + rm /tmp/tmp.trAZVyz2OI /tmp/tmp.1r12vJFONX + return 0 + kubectl_bin wait --for=delete namespace version-service-7403 ++ mktemp + local LAST_OUT=/tmp/tmp.79cDXmss8J ++ mktemp + local LAST_ERR=/tmp/tmp.ggSyQ3l0HN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace version-service-7403 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.79cDXmss8J + cat /tmp/tmp.ggSyQ3l0HN + rm /tmp/tmp.79cDXmss8J /tmp/tmp.ggSyQ3l0HN + return 0 + desc 'create namespace version-service-7403' + set +o xtrace ----------------------------------------------------------------------------------- create namespace version-service-7403 ----------------------------------------------------------------------------------- + kubectl_bin create namespace version-service-7403 ++ mktemp + local LAST_OUT=/tmp/tmp.mwsfoXlL04 ++ mktemp + local LAST_ERR=/tmp/tmp.qwFSZWD8Gg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace version-service-7403 namespace "gke-managed-cim" deleted namespace "gke-managed-system" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mwsfoXlL04 namespace/version-service-7403 created + cat /tmp/tmp.qwFSZWD8Gg + rm /tmp/tmp.mwsfoXlL04 /tmp/tmp.qwFSZWD8Gg + return 0 + set_kube_ctx version-service-7403 + local namespace=version-service-7403 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.gOFeBb8E5l +++ mktemp ++ local LAST_ERR=/tmp/tmp.eKiRR4oo07 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gOFeBb8E5l ++ cat /tmp/tmp.eKiRR4oo07 ++ rm /tmp/tmp.gOFeBb8E5l /tmp/tmp.eKiRR4oo07 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster6 --namespace=version-service-7403 ++ mktemp + local LAST_OUT=/tmp/tmp.V5ApmK5dpo ++ mktemp + local LAST_ERR=/tmp/tmp.DFo6vRXhBo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster6 --namespace=version-service-7403 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V5ApmK5dpo Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1855-bb3d32e4-4-cluster6" modified. + cat /tmp/tmp.DFo6vRXhBo + rm /tmp/tmp.V5ApmK5dpo /tmp/tmp.DFo6vRXhBo + return 0 + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.zgebi7feOv ++ mktemp + local LAST_ERR=/tmp/tmp.DKiyOhHbds + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zgebi7feOv configmap/versions created + cat /tmp/tmp.DKiyOhHbds + rm /tmp/tmp.zgebi7feOv /tmp/tmp.DKiyOhHbds + return 0 + kubectl_bin apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9ry4CaFkr2 ++ mktemp + local LAST_ERR=/tmp/tmp.BArkHa0fDj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9ry4CaFkr2 deployment.apps/version-service created service/version-service created + cat /tmp/tmp.BArkHa0fDj + rm /tmp/tmp.9ry4CaFkr2 /tmp/tmp.BArkHa0fDj + return 0 + sleep 10 + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.M40lW4BwY7 ++ mktemp + local LAST_ERR=/tmp/tmp.q0s5y7qOoQ + local exit_status=0 + local timeout=4 + yq eval '(.. | select(tag == "!!str")) |= sub("version-service$", "version-service-cr")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/vs.yml ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M40lW4BwY7 deployment.apps/version-service-cr created service/version-service-cr created + cat /tmp/tmp.q0s5y7qOoQ + rm /tmp/tmp.M40lW4BwY7 /tmp/tmp.q0s5y7qOoQ + return 0 + kubectl_bin -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 ++ mktemp + local LAST_OUT=/tmp/tmp.hvnrbL5zIp ++ mktemp + local LAST_ERR=/tmp/tmp.D3XsgdrRls + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hvnrbL5zIp deployment.apps/percona-server-mongodb-operator env updated + cat /tmp/tmp.D3XsgdrRls + rm /tmp/tmp.hvnrbL5zIp /tmp/tmp.D3XsgdrRls + return 0 + sleep 30 + desc 'enable telemetry on operator level' + set +o xtrace ----------------------------------------------------------------------------------- enable telemetry on operator level ----------------------------------------------------------------------------------- + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "false"' + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Y3xUSU9DrE ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.VrefXhUpNx + local LAST_ERR=/tmp/tmp.ZyZJrzwEmI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator ++ mktemp + local LAST_ERR=/tmp/tmp.77VCWbCApP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y3xUSU9DrE + cat /tmp/tmp.ZyZJrzwEmI + rm /tmp/tmp.Y3xUSU9DrE /tmp/tmp.ZyZJrzwEmI + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VrefXhUpNx deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.77VCWbCApP + rm /tmp/tmp.VrefXhUpNx /tmp/tmp.77VCWbCApP + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.AqFRx8pV3o ++ mktemp + local LAST_ERR=/tmp/tmp.ybfpoFCqgf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AqFRx8pV3o + cat /tmp/tmp.ybfpoFCqgf + rm /tmp/tmp.AqFRx8pV3o /tmp/tmp.ybfpoFCqgf + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZnC3XhUGdu +++ mktemp ++ local LAST_ERR=/tmp/tmp.wdTzgvYbuO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZnC3XhUGdu ++ cat /tmp/tmp.wdTzgvYbuO ++ rm /tmp/tmp.ZnC3XhUGdu /tmp/tmp.wdTzgvYbuO ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.MZiDtgeXPd +++ mktemp ++ local LAST_ERR=/tmp/tmp.LaZil0Wfxb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MZiDtgeXPd ++ cat /tmp/tmp.LaZil0Wfxb ++ rm /tmp/tmp.MZiDtgeXPd /tmp/tmp.LaZil0Wfxb ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 disabled enabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=enabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.wlXTZ1qJ6F ++ mktemp + local LAST_ERR=/tmp/tmp.6rfSOxh3dQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wlXTZ1qJ6F deployment.apps/psmdb-client created + cat /tmp/tmp.6rfSOxh3dQ + rm /tmp/tmp.wlXTZ1qJ6F /tmp/tmp.6rfSOxh3dQ + return 0 + kubectl_bin apply -f - + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7j7uk9yobs ++ mktemp + local LAST_ERR=/tmp/tmp.rPD5UmtA9D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7j7uk9yobs secret/minimal-cluster created + cat /tmp/tmp.rPD5UmtA9D + rm /tmp/tmp.7j7uk9yobs /tmp/tmp.rPD5UmtA9D + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.enabled = false | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zNHUAyAeKW ++ mktemp + local LAST_ERR=/tmp/tmp.xz0ixnKfxr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zNHUAyAeKW perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.xz0ixnKfxr + rm /tmp/tmp.zNHUAyAeKW /tmp/tmp.xz0ixnKfxr + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E08rmaG7xC +++ mktemp ++ local LAST_ERR=/tmp/tmp.3yVYIiQOKK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E08rmaG7xC ++ cat /tmp/tmp.3yVYIiQOKK ++ rm /tmp/tmp.E08rmaG7xC /tmp/tmp.3yVYIiQOKK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready.................OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NabsHHqhTc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Unqx3v60Jh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NabsHHqhTc ++ cat /tmp/tmp.Unqx3v60Jh ++ rm /tmp/tmp.NabsHHqhTc /tmp/tmp.Unqx3v60Jh ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SSvgtSzOuk +++ mktemp ++ local LAST_ERR=/tmp/tmp.S535GWo7bC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SSvgtSzOuk ++ cat /tmp/tmp.S535GWo7bC ++ rm /tmp/tmp.SSvgtSzOuk /tmp/tmp.S535GWo7bC ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............ + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FY4cXQFREX +++ mktemp ++ local LAST_ERR=/tmp/tmp.SqdDGMTuQ9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FY4cXQFREX ++ cat /tmp/tmp.SqdDGMTuQ9 ++ rm /tmp/tmp.FY4cXQFREX /tmp/tmp.SqdDGMTuQ9 ++ return 0 + local client_container=psmdb-client-66f577db5f-txktc + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-txktc -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.pVWEgclnQz ++ mktemp + local LAST_ERR=/tmp/tmp.BYpqw2TjCu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-txktc -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pVWEgclnQz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("5b1d2df5-bdf1-4d58-a6d9-4b72c1f65597") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.BYpqw2TjCu + rm /tmp/tmp.pVWEgclnQz /tmp/tmp.BYpqw2TjCu + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qWSZnYmjgd +++ mktemp ++ local LAST_ERR=/tmp/tmp.45GrSkS0H7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qWSZnYmjgd ++ cat /tmp/tmp.45GrSkS0H7 ++ rm /tmp/tmp.qWSZnYmjgd /tmp/tmp.45GrSkS0H7 ++ return 0 + local client_container=psmdb-client-66f577db5f-txktc + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-txktc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tF2zO9eZPY ++ mktemp + local LAST_ERR=/tmp/tmp.oC6Du9k2pa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-txktc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tF2zO9eZPY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6d153538-d5a1-48c7-95ca-7f6274603b30") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.oC6Du9k2pa + rm /tmp/tmp.tF2zO9eZPY /tmp/tmp.oC6Du9k2pa + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.request.content".msg.customResourceUid)' + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + grep -Eo '\{.*\}' + kubectl_bin logs version-service-cr-567f5fd64b-kl6s2 -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yrX20Wm3KZ ++ mktemp + local LAST_ERR=/tmp/tmp.y8sZN37Cg6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-cr-567f5fd64b-kl6s2 -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yrX20Wm3KZ + cat /tmp/tmp.y8sZN37Cg6 + rm /tmp/tmp.yrX20Wm3KZ /tmp/tmp.y8sZN37Cg6 + return 0 + jq 'del(."grpc.start_time")' + jq 'del(."grpc.request.content".msg.kubeVersion)' + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-69d6d975b6-lg2xv -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bS57s1SwiG ++ mktemp + local LAST_ERR=/tmp/tmp.DyyRXAgWEC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-69d6d975b6-lg2xv -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bS57s1SwiG + cat /tmp/tmp.DyyRXAgWEC + rm /tmp/tmp.bS57s1SwiG /tmp/tmp.DyyRXAgWEC + return 0 + local telemetry_log_file=enabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == enabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.2WaJ9ND5BX/enabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json + [[ -s /tmp/tmp.2WaJ9ND5BX/enabled_telemetry.version-service-cr.log.json ]] + local telemetry_cr_log_file=enabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a enabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.2FawWfZPEY +++ mktemp ++ local LAST_ERR=/tmp/tmp.oQJtVTuvaW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2FawWfZPEY ++ cat /tmp/tmp.oQJtVTuvaW ++ rm /tmp/tmp.2FawWfZPEY /tmp/tmp.oQJtVTuvaW ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-7b87685c84-cphd2 ++ mktemp + local LAST_OUT=/tmp/tmp.NNgYCLDa7r ++ mktemp + local LAST_ERR=/tmp/tmp.C2F5FgePsT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-7b87685c84-cphd2 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NNgYCLDa7r pod "percona-server-mongodb-operator-7b87685c84-cphd2" deleted + cat /tmp/tmp.C2F5FgePsT + rm /tmp/tmp.NNgYCLDa7r /tmp/tmp.C2F5FgePsT + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.nG2zZmOGa7 ++ mktemp + local LAST_ERR=/tmp/tmp.5nBL3nJHvh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nG2zZmOGa7 perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.5nBL3nJHvh + rm /tmp/tmp.nG2zZmOGa7 /tmp/tmp.5nBL3nJHvh + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.c0ETMn84fL ++ mktemp + local LAST_ERR=/tmp/tmp.omwgKSWyBZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c0ETMn84fL perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted + cat /tmp/tmp.omwgKSWyBZ + rm /tmp/tmp.c0ETMn84fL /tmp/tmp.omwgKSWyBZ + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.k6DqrVoU68 ++ mktemp + local LAST_ERR=/tmp/tmp.JXsY4PvvX1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k6DqrVoU68 deployment.apps "psmdb-client" deleted + cat /tmp/tmp.JXsY4PvvX1 + rm /tmp/tmp.k6DqrVoU68 /tmp/tmp.JXsY4PvvX1 + return 0 + sleep 30 + desc 'disabling telemetry on the operator level' + set +o xtrace ----------------------------------------------------------------------------------- disabling telemetry on the operator level ----------------------------------------------------------------------------------- + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.zSDZPx3OJC ++ mktemp + local LAST_ERR=/tmp/tmp.2DZlP1CcUH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zSDZPx3OJC pod "version-service-cr-567f5fd64b-kl6s2" deleted + cat /tmp/tmp.2DZlP1CcUH + rm /tmp/tmp.zSDZPx3OJC /tmp/tmp.2DZlP1CcUH + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.isnegUCFuk ++ mktemp + local LAST_ERR=/tmp/tmp.quWTdjcDeQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.isnegUCFuk pod "version-service-69d6d975b6-lg2xv" deleted + cat /tmp/tmp.quWTdjcDeQ + rm /tmp/tmp.isnegUCFuk /tmp/tmp.quWTdjcDeQ + return 0 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + local LAST_OUT=/tmp/tmp.d3940b5YP3 + local LAST_OUT=/tmp/tmp.DEEGtonYt7 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.rWRNwFq3LH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + local LAST_ERR=/tmp/tmp.ToOMlaNOYk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DEEGtonYt7 + cat /tmp/tmp.rWRNwFq3LH + rm /tmp/tmp.DEEGtonYt7 /tmp/tmp.rWRNwFq3LH + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d3940b5YP3 deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.ToOMlaNOYk + rm /tmp/tmp.d3940b5YP3 /tmp/tmp.ToOMlaNOYk + return 0 ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod7.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod7.0 'mongod --version' +++ /usr/bin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod7.0 +++ local 'cli=mongod --version' +++ local pod_name=15873 +++ kubectl_bin -n default run 15873 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oLic37Dt51 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CIsx1t2ZMx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default run 15873 --image=perconalab/percona-server-mongodb-operator:main-mongod7.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oLic37Dt51 +++ cat /tmp/tmp.CIsx1t2ZMx +++ rm /tmp/tmp.oLic37Dt51 /tmp/tmp.CIsx1t2ZMx +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/15873 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DGKomILEZ2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bbVgA51Oln +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/15873 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DGKomILEZ2 +++ cat /tmp/tmp.bbVgA51Oln +++ rm /tmp/tmp.DGKomILEZ2 /tmp/tmp.bbVgA51Oln +++ return 0 ++++ kubectl_bin -n default exec 15873 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.rzDWaOo0wE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2gCpQjmSOC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl -n default exec 15873 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.rzDWaOo0wE ++++ cat /tmp/tmp.2gCpQjmSOC ++++ rm /tmp/tmp.rzDWaOo0wE /tmp/tmp.2gCpQjmSOC ++++ return 0 +++ local 'output=db version v7.0.18-11 Build Info: { "version": "7.0.18-11", "gitVersion": "97ceec31da21050a3f6f3b203bac9e5a2685dcd2", "openSSLVersion": "OpenSSL 3.2.2 4 Jun 2024", "modules": [], "proFeatures": [], "allocator": "tcmalloc", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/15873 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6fdeQtwjGY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4APdJ9M8Q7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl -n default delete pod/15873 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6fdeQtwjGY +++ cat /tmp/tmp.4APdJ9M8Q7 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.6fdeQtwjGY /tmp/tmp.4APdJ9M8Q7 +++ return 0 +++ echo db version v7.0.18-11 Build Info: '{' '"version":' '"7.0.18-11",' '"gitVersion":' '"97ceec31da21050a3f6f3b203bac9e5a2685dcd2",' '"openSSLVersion":' '"OpenSSL' 3.2.2 4 Jun '2024",' '"modules":' '[],' '"proFeatures":' '[],' '"allocator":' '"tcmalloc",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=7.0.18-11 ++ [[ ! 7.0.18-11 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 7.0.18-11 + ACTUAL_MONGOD_VERSION=7.0.18-11 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.W0HBaMANQI ++ mktemp + local LAST_ERR=/tmp/tmp.qlcgP8tNcQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W0HBaMANQI + cat /tmp/tmp.qlcgP8tNcQ + rm /tmp/tmp.W0HBaMANQI /tmp/tmp.qlcgP8tNcQ + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.GjwmgNdW92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.GsljaFJMl1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GjwmgNdW92 ++ cat /tmp/tmp.GsljaFJMl1 ++ rm /tmp/tmp.GjwmgNdW92 /tmp/tmp.GsljaFJMl1 ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rqb4AxmXf4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.g8EjeAJ7cj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rqb4AxmXf4 ++ cat /tmp/tmp.g8EjeAJ7cj ++ rm /tmp/tmp.Rqb4AxmXf4 /tmp/tmp.g8EjeAJ7cj ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 7.0-recommended disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=7.0-recommended + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ayw9etmEI5 ++ mktemp + local LAST_ERR=/tmp/tmp.uos0ASMAfB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ayw9etmEI5 deployment.apps/psmdb-client created + cat /tmp/tmp.uos0ASMAfB + rm /tmp/tmp.ayw9etmEI5 /tmp/tmp.uos0ASMAfB + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JaQ6X9dkuH ++ mktemp + local LAST_ERR=/tmp/tmp.ISi9SF26sW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JaQ6X9dkuH secret/minimal-cluster configured + cat /tmp/tmp.ISi9SF26sW Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.JaQ6X9dkuH /tmp/tmp.ISi9SF26sW + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "7.0-recommended" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.enabled = false | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cr-minimal.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.WIXzHQgZWV ++ mktemp + local LAST_ERR=/tmp/tmp.COFqpkU7Iv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WIXzHQgZWV perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.COFqpkU7Iv + rm /tmp/tmp.WIXzHQgZWV /tmp/tmp.COFqpkU7Iv + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LnLM67BUP6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.s62si8hjc0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LnLM67BUP6 ++ cat /tmp/tmp.s62si8hjc0 ++ rm /tmp/tmp.LnLM67BUP6 /tmp/tmp.s62si8hjc0 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready................OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pIdvWBFM79 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rMflXtlgoj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pIdvWBFM79 ++ cat /tmp/tmp.rMflXtlgoj ++ rm /tmp/tmp.pIdvWBFM79 /tmp/tmp.rMflXtlgoj ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pqLDNl6DfS +++ mktemp ++ local LAST_ERR=/tmp/tmp.qn93EnOliT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pqLDNl6DfS ++ cat /tmp/tmp.qn93EnOliT ++ rm /tmp/tmp.pqLDNl6DfS /tmp/tmp.qn93EnOliT ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......... + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z90fc9o07r +++ mktemp ++ local LAST_ERR=/tmp/tmp.C9JBQqNYAp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z90fc9o07r ++ cat /tmp/tmp.C9JBQqNYAp ++ rm /tmp/tmp.z90fc9o07r /tmp/tmp.C9JBQqNYAp ++ return 0 + local client_container=psmdb-client-66f577db5f-wqp2c + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wqp2c -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yJpmMVOVU0 ++ mktemp + local LAST_ERR=/tmp/tmp.GvzVFwl6n4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-wqp2c -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yJpmMVOVU0 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b8ce68a1-536c-4a7f-a09d-53dfa29fe609") } Percona Server for MongoDB server version: v7.0.5-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.GvzVFwl6n4 + rm /tmp/tmp.yJpmMVOVU0 /tmp/tmp.GvzVFwl6n4 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rfq70dcXvX +++ mktemp ++ local LAST_ERR=/tmp/tmp.WMJxLUC8h3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rfq70dcXvX ++ cat /tmp/tmp.WMJxLUC8h3 ++ rm /tmp/tmp.rfq70dcXvX /tmp/tmp.WMJxLUC8h3 ++ return 0 + local client_container=psmdb-client-66f577db5f-wqp2c + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-wqp2c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.SD7WKtAPBP ++ mktemp + local LAST_ERR=/tmp/tmp.pHpTyuEoFf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-wqp2c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SD7WKtAPBP Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("44fd9b4f-0642-4653-96b9-245bbc07950e") } Percona Server for MongoDB server version: v7.0.5-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.pHpTyuEoFf + rm /tmp/tmp.SD7WKtAPBP /tmp/tmp.pHpTyuEoFf + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + jq 'del(."grpc.start_time")' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.request.content".msg.kubeVersion)' + grep -Eo '\{.*\}' + grep -E 'server request payload|unary call' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + kubectl_bin logs version-service-cr-567f5fd64b-57pzc -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WbmpQQgxUh ++ mktemp + local LAST_ERR=/tmp/tmp.gdGqFjzOIf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-cr-567f5fd64b-57pzc -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WbmpQQgxUh + cat /tmp/tmp.gdGqFjzOIf + rm /tmp/tmp.WbmpQQgxUh /tmp/tmp.gdGqFjzOIf + return 0 + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + grep -E 'server request payload|unary call' + jq 'del(."grpc.request.content".msg.customResourceUid)' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.kubeVersion)' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + kubectl_bin logs version-service-69d6d975b6-qt9b8 -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.EkCeih1MBl ++ mktemp + local LAST_ERR=/tmp/tmp.v7i3wkjTV1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-69d6d975b6-qt9b8 -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EkCeih1MBl + cat /tmp/tmp.v7i3wkjTV1 + rm /tmp/tmp.EkCeih1MBl /tmp/tmp.v7i3wkjTV1 + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' 7.0-recommended == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=7.0 + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' 7.0-recommended == 7.0-recommended -a disabled == disabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.2WaJ9ND5BX/disabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json + [[ -s /tmp/tmp.2WaJ9ND5BX/disabled_telemetry.version-service.log.json ]] + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' 7.0-recommended == disabled -a disabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.R7JcrRdJbH +++ mktemp ++ local LAST_ERR=/tmp/tmp.aVMSA5jYYE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R7JcrRdJbH ++ cat /tmp/tmp.aVMSA5jYYE ++ rm /tmp/tmp.R7JcrRdJbH /tmp/tmp.aVMSA5jYYE ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-scf6t ++ mktemp + local LAST_OUT=/tmp/tmp.RHpIQPZUW1 ++ mktemp + local LAST_ERR=/tmp/tmp.p3nIUGRVpE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-scf6t + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RHpIQPZUW1 pod "percona-server-mongodb-operator-dd9b64f87-scf6t" deleted + cat /tmp/tmp.p3nIUGRVpE + rm /tmp/tmp.RHpIQPZUW1 /tmp/tmp.p3nIUGRVpE + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.74wuHsXPzu ++ mktemp + local LAST_ERR=/tmp/tmp.tlIE65s8Wx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.74wuHsXPzu perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.tlIE65s8Wx + rm /tmp/tmp.74wuHsXPzu /tmp/tmp.tlIE65s8Wx + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.ml8NLwC521 ++ mktemp + local LAST_ERR=/tmp/tmp.6xSshNPfNL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ml8NLwC521 perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted + cat /tmp/tmp.6xSshNPfNL + rm /tmp/tmp.ml8NLwC521 /tmp/tmp.6xSshNPfNL + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.vYC63RUTJt ++ mktemp + local LAST_ERR=/tmp/tmp.q5CP5ejarj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vYC63RUTJt deployment.apps "psmdb-client" deleted + cat /tmp/tmp.q5CP5ejarj + rm /tmp/tmp.vYC63RUTJt /tmp/tmp.q5CP5ejarj + return 0 + sleep 30 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.kxp4TOAMeA + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.Tdiwu0IEHv ++ mktemp + local LAST_ERR=/tmp/tmp.xDRhWjCzml + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.bAGpWbOtTc + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + for i in '$(seq 0 2)' + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kxp4TOAMeA + cat /tmp/tmp.xDRhWjCzml + rm /tmp/tmp.kxp4TOAMeA /tmp/tmp.xDRhWjCzml + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tdiwu0IEHv deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.bAGpWbOtTc + rm /tmp/tmp.Tdiwu0IEHv /tmp/tmp.bAGpWbOtTc + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.vulaKqS4kp ++ mktemp + local LAST_ERR=/tmp/tmp.4aEU2umEHo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vulaKqS4kp + cat /tmp/tmp.4aEU2umEHo + rm /tmp/tmp.vulaKqS4kp /tmp/tmp.4aEU2umEHo + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.CY60x8B1cO +++ mktemp ++ local LAST_ERR=/tmp/tmp.RWkGBPukVX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CY60x8B1cO ++ cat /tmp/tmp.RWkGBPukVX ++ rm /tmp/tmp.CY60x8B1cO /tmp/tmp.RWkGBPukVX ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.0xpcqithAE +++ mktemp ++ local LAST_ERR=/tmp/tmp.z7G2KvE96b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0xpcqithAE ++ cat /tmp/tmp.z7G2KvE96b ++ rm /tmp/tmp.0xpcqithAE /tmp/tmp.z7G2KvE96b ++ return 0 + '[' 1 == 1 ']' + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.c7rlXPLqln ++ mktemp + local LAST_ERR=/tmp/tmp.0287nkelsl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.c7rlXPLqln pod "version-service-cr-567f5fd64b-57pzc" deleted + cat /tmp/tmp.0287nkelsl + rm /tmp/tmp.c7rlXPLqln /tmp/tmp.0287nkelsl + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.avmxGRsBna ++ mktemp + local LAST_ERR=/tmp/tmp.MgW9gT2JW1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.avmxGRsBna pod "version-service-69d6d975b6-qt9b8" deleted + cat /tmp/tmp.MgW9gT2JW1 + rm /tmp/tmp.avmxGRsBna /tmp/tmp.MgW9gT2JW1 + return 0 + check_telemetry_transfer http://version-service-cr:11000 disabled disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BovEdCdGFJ ++ mktemp + local LAST_ERR=/tmp/tmp.9x69tnTjmG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BovEdCdGFJ deployment.apps/psmdb-client created + cat /tmp/tmp.9x69tnTjmG + rm /tmp/tmp.BovEdCdGFJ /tmp/tmp.9x69tnTjmG + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ZS2kHTDhut ++ mktemp + local LAST_ERR=/tmp/tmp.9TKPYbJmEJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZS2kHTDhut secret/minimal-cluster configured + cat /tmp/tmp.9TKPYbJmEJ Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.ZS2kHTDhut /tmp/tmp.9TKPYbJmEJ + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.enabled = false | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.YcRuMXWtCE ++ mktemp + local LAST_ERR=/tmp/tmp.ggREWFAmuZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YcRuMXWtCE perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.ggREWFAmuZ + rm /tmp/tmp.YcRuMXWtCE /tmp/tmp.ggREWFAmuZ + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uUsw0xOzqI +++ mktemp ++ local LAST_ERR=/tmp/tmp.zpl4P3iSqI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uUsw0xOzqI ++ cat /tmp/tmp.zpl4P3iSqI ++ rm /tmp/tmp.uUsw0xOzqI /tmp/tmp.zpl4P3iSqI ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready............OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wMfjUyutqz +++ mktemp ++ local LAST_ERR=/tmp/tmp.m9MEEzJm2v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wMfjUyutqz ++ cat /tmp/tmp.m9MEEzJm2v ++ rm /tmp/tmp.wMfjUyutqz /tmp/tmp.m9MEEzJm2v ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GFvtV5Scoe +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nfdv3mA2QM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GFvtV5Scoe ++ cat /tmp/tmp.Nfdv3mA2QM ++ rm /tmp/tmp.GFvtV5Scoe /tmp/tmp.Nfdv3mA2QM ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............... + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Dgl35wNWaX +++ mktemp ++ local LAST_ERR=/tmp/tmp.al5qhXMT1m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Dgl35wNWaX ++ cat /tmp/tmp.al5qhXMT1m ++ rm /tmp/tmp.Dgl35wNWaX /tmp/tmp.al5qhXMT1m ++ return 0 + local client_container=psmdb-client-66f577db5f-9lngr + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-9lngr -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QRYKF0aVnl ++ mktemp + local LAST_ERR=/tmp/tmp.1xHv4qkC4P + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-9lngr -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QRYKF0aVnl Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("69be4ecd-a065-4f7e-af18-cb317383055e") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.1xHv4qkC4P + rm /tmp/tmp.QRYKF0aVnl /tmp/tmp.1xHv4qkC4P + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yzif5kkXe6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NdGPlGBP98 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yzif5kkXe6 ++ cat /tmp/tmp.NdGPlGBP98 ++ rm /tmp/tmp.yzif5kkXe6 /tmp/tmp.NdGPlGBP98 ++ return 0 + local client_container=psmdb-client-66f577db5f-9lngr + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-9lngr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.8HWfRQCq3j ++ mktemp + local LAST_ERR=/tmp/tmp.kBpzVyET8q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-9lngr -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8HWfRQCq3j Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2554bc73-9b7b-4813-83c3-a4d53b244073") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.kBpzVyET8q + rm /tmp/tmp.8HWfRQCq3j /tmp/tmp.kBpzVyET8q + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + jq 'del(."grpc.time_ms")' + jq 'del(."grpc.start_time")' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + kubectl_bin logs version-service-cr-567f5fd64b-fslwl -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.6F0jcjGsSc ++ mktemp + local LAST_ERR=/tmp/tmp.uXqwglyyuG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-cr-567f5fd64b-fslwl -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6F0jcjGsSc + cat /tmp/tmp.uXqwglyyuG + rm /tmp/tmp.6F0jcjGsSc /tmp/tmp.uXqwglyyuG + return 0 + jq 'del(."grpc.start_time")' + jq 'del(."grpc.request.content".msg.kubeVersion)' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.time_ms")' + grep -E 'server request payload|unary call' + jq 'del(."grpc.request.content".msg.customResourceUid)' + kubectl_bin logs version-service-69d6d975b6-jvfl4 -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.f0H0wKWV7x ++ mktemp + local LAST_ERR=/tmp/tmp.FL6S9HAo6e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs version-service-69d6d975b6-jvfl4 -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.f0H0wKWV7x + cat /tmp/tmp.FL6S9HAo6e + rm /tmp/tmp.f0H0wKWV7x /tmp/tmp.FL6S9HAo6e + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a disabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == disabled ']' + [[ -s /tmp/tmp.2WaJ9ND5BX/disabled_telemetry.version-service-cr.log.json ]] + [[ -s /tmp/tmp.2WaJ9ND5BX/disabled_telemetry.version-service.log.json ]] ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.QktmTH6lzu +++ mktemp ++ local LAST_ERR=/tmp/tmp.b4aarszV9J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QktmTH6lzu ++ cat /tmp/tmp.b4aarszV9J ++ rm /tmp/tmp.QktmTH6lzu /tmp/tmp.b4aarszV9J ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-zmwpt ++ mktemp + local LAST_OUT=/tmp/tmp.8ysupVmlJj ++ mktemp + local LAST_ERR=/tmp/tmp.f4Acu7Mh8c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-zmwpt + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8ysupVmlJj pod "percona-server-mongodb-operator-dd9b64f87-zmwpt" deleted + cat /tmp/tmp.f4Acu7Mh8c + rm /tmp/tmp.8ysupVmlJj /tmp/tmp.f4Acu7Mh8c + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.RPvk9oSnHe ++ mktemp + local LAST_ERR=/tmp/tmp.UfOMnhqkvX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RPvk9oSnHe perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.UfOMnhqkvX + rm /tmp/tmp.RPvk9oSnHe /tmp/tmp.UfOMnhqkvX + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.UeGdAD01OB ++ mktemp + local LAST_ERR=/tmp/tmp.HjwQQjDGcp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UeGdAD01OB perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted + cat /tmp/tmp.HjwQQjDGcp + rm /tmp/tmp.UeGdAD01OB /tmp/tmp.HjwQQjDGcp + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.ijg5MIgrxx ++ mktemp + local LAST_ERR=/tmp/tmp.fNJgGoazov + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ijg5MIgrxx deployment.apps "psmdb-client" deleted + cat /tmp/tmp.fNJgGoazov + rm /tmp/tmp.ijg5MIgrxx /tmp/tmp.fNJgGoazov + return 0 + sleep 30 + cases=("version-service-exact" "version-service-recommended" "version-service-latest" "version-service-major" "version-service-unreachable") + expected_images=("percona/percona-server-mongodb:6.0.3-2" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:6.0.4-3" "$IMAGE_MONGOD") + for i in '"${!cases[@]}"' + desc 'test version-service-exact' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-exact ----------------------------------------------------------------------------------- + cluster=version-service-exact + expected_image=percona/percona-server-mongodb:6.0.3-2 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.MrXWEbJLTP ++ mktemp + local LAST_ERR=/tmp/tmp.B2wa5qqZp9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MrXWEbJLTP secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.B2wa5qqZp9 + rm /tmp/tmp.MrXWEbJLTP /tmp/tmp.B2wa5qqZp9 + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.kMD4mKG5wF + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/version-service-exact-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.kMD4mKG5wF ++ mktemp + local LAST_OUT=/tmp/tmp.dUZjoOXur0 ++ mktemp + local LAST_ERR=/tmp/tmp.cU9MMK1lQX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dUZjoOXur0 perconaservermongodb.psmdb.percona.com/version-service-exact created + cat /tmp/tmp.cU9MMK1lQX + rm /tmp/tmp.dUZjoOXur0 /tmp/tmp.cU9MMK1lQX + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-exact-rs0 3 + local name=version-service-exact-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-exact ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod version-service-exact-rs0-0 + local pod=version-service-exact-rs0-0 + set +o xtrace waiting for pod/version-service-exact-rs0-0 to be ready...............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod version-service-exact-rs0-1 + local pod=version-service-exact-rs0-1 + set +o xtrace waiting for pod/version-service-exact-rs0-1 to be ready..................OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RBGJwMVtxv +++ mktemp ++ local LAST_ERR=/tmp/tmp.kOSDOc9MzX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RBGJwMVtxv ++ cat /tmp/tmp.kOSDOc9MzX ++ rm /tmp/tmp.RBGJwMVtxv /tmp/tmp.kOSDOc9MzX ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-exact-rs0-2 + local pod=version-service-exact-rs0-2 + set +o xtrace waiting for pod/version-service-exact-rs0-2 to be ready................OK ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZIAtjNcRxY +++ mktemp ++ local LAST_ERR=/tmp/tmp.d2RXtqZXAh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZIAtjNcRxY ++ cat /tmp/tmp.d2RXtqZXAh ++ rm /tmp/tmp.ZIAtjNcRxY /tmp/tmp.d2RXtqZXAh ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jafM3aDnhh +++ mktemp ++ local LAST_ERR=/tmp/tmp.BtXLQvBkG9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jafM3aDnhh ++ cat /tmp/tmp.BtXLQvBkG9 ++ rm /tmp/tmp.jafM3aDnhh /tmp/tmp.BtXLQvBkG9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.MtXrjYqW1X ++ mktemp + local LAST_ERR=/tmp/tmp.lxkXGXZzAP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MtXrjYqW1X + cat /tmp/tmp.lxkXGXZzAP + rm /tmp/tmp.MtXrjYqW1X /tmp/tmp.lxkXGXZzAP + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rFEqdwCOAw +++ mktemp ++ local LAST_ERR=/tmp/tmp.TCOuSYZ8Ri ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rFEqdwCOAw ++ cat /tmp/tmp.TCOuSYZ8Ri ++ rm /tmp/tmp.rFEqdwCOAw /tmp/tmp.TCOuSYZ8Ri ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.sxNv5qzQ8V ++ mktemp + local LAST_ERR=/tmp/tmp.5B46I3IwYh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sxNv5qzQ8V Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-0.version-service-exact-rs0.version-service-7403.svc.cluster.local:27017,version-service-exact-rs0-1.version-service-exact-rs0.version-service-7403.svc.cluster.local:27017,version-service-exact-rs0-2.version-service-exact-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b89d860e-102c-4786-a146-964a781e51cd") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.5B46I3IwYh + rm /tmp/tmp.sxNv5qzQ8V /tmp/tmp.5B46I3IwYh + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-exact-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-exact-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rp83vy0Fo7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IQ5Fuwj0uL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rp83vy0Fo7 ++ cat /tmp/tmp.IQ5Fuwj0uL ++ rm /tmp/tmp.Rp83vy0Fo7 /tmp/tmp.IQ5Fuwj0uL ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ myApp:myPass@version-service-exact-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.rPpoQqTxMU ++ mktemp + local LAST_ERR=/tmp/tmp.DEBE9Y6Cql + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rPpoQqTxMU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-2.version-service-exact-rs0.version-service-7403.svc.cluster.local:27017,version-service-exact-rs0-0.version-service-exact-rs0.version-service-7403.svc.cluster.local:27017,version-service-exact-rs0-1.version-service-exact-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("1feb30f8-c2f6-47c6-bc13-08e7024ef681") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.DEBE9Y6Cql + rm /tmp/tmp.rPpoQqTxMU /tmp/tmp.DEBE9Y6Cql + return 0 + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.IigOqsYUfY ++ mktemp + local LAST_ERR=/tmp/tmp.I6UIwxwv9b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IigOqsYUfY + cat /tmp/tmp.I6UIwxwv9b + rm /tmp/tmp.IigOqsYUfY /tmp/tmp.I6UIwxwv9b + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-exact-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-exact-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-exact-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-exact-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.iDeWcDTBdI ++ mktemp + local LAST_ERR=/tmp/tmp.rXE10sgELz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iDeWcDTBdI perconaservermongodb.psmdb.percona.com "version-service-exact" deleted + cat /tmp/tmp.rXE10sgELz + rm /tmp/tmp.iDeWcDTBdI /tmp/tmp.rXE10sgELz + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.fEWU5ti0wp +++ mktemp ++ local LAST_ERR=/tmp/tmp.lB3DeJivAy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fEWU5ti0wp ++ cat /tmp/tmp.lB3DeJivAy ++ rm /tmp/tmp.fEWU5ti0wp /tmp/tmp.lB3DeJivAy ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-k9zns pod "percona-server-mongodb-operator-dd9b64f87-k9zns" deleted + sleep 10 + for i in '"${!cases[@]}"' + desc 'test version-service-recommended' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-recommended ----------------------------------------------------------------------------------- + cluster=version-service-recommended + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.M0kl5M7PNR ++ mktemp + local LAST_ERR=/tmp/tmp.zh1RTMit8j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M0kl5M7PNR secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.zh1RTMit8j Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.M0kl5M7PNR /tmp/tmp.zh1RTMit8j + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.8mw0BNO44N + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/version-service-recommended-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.8mw0BNO44N + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.n8lH4ig2j6 ++ mktemp + local LAST_ERR=/tmp/tmp.28fEnvaEjg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n8lH4ig2j6 perconaservermongodb.psmdb.percona.com/version-service-recommended created + cat /tmp/tmp.28fEnvaEjg + rm /tmp/tmp.n8lH4ig2j6 /tmp/tmp.28fEnvaEjg + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-recommended-rs0 3 + local name=version-service-recommended-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-recommended ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod version-service-recommended-rs0-0 + local pod=version-service-recommended-rs0-0 + set +o xtrace waiting for pod/version-service-recommended-rs0-0 to be ready.................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod version-service-recommended-rs0-1 + local pod=version-service-recommended-rs0-1 + set +o xtrace waiting for pod/version-service-recommended-rs0-1 to be ready................OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0Z6AMffnj7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ILEDDnWxWu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0Z6AMffnj7 ++ cat /tmp/tmp.ILEDDnWxWu ++ rm /tmp/tmp.0Z6AMffnj7 /tmp/tmp.ILEDDnWxWu ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-recommended-rs0-2 + local pod=version-service-recommended-rs0-2 + set +o xtrace waiting for pod/version-service-recommended-rs0-2 to be ready...............OK ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M5wrKn1frS +++ mktemp ++ local LAST_ERR=/tmp/tmp.VBUYYe62IY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M5wrKn1frS ++ cat /tmp/tmp.VBUYYe62IY ++ rm /tmp/tmp.M5wrKn1frS /tmp/tmp.VBUYYe62IY ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wpEkmNB18x +++ mktemp ++ local LAST_ERR=/tmp/tmp.P1vpmqsYLE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wpEkmNB18x ++ cat /tmp/tmp.P1vpmqsYLE ++ rm /tmp/tmp.wpEkmNB18x /tmp/tmp.P1vpmqsYLE ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.Qp23WOK5oQ ++ mktemp + local LAST_ERR=/tmp/tmp.FSNBo0JvL3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qp23WOK5oQ + cat /tmp/tmp.FSNBo0JvL3 + rm /tmp/tmp.Qp23WOK5oQ /tmp/tmp.FSNBo0JvL3 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aG1p6x7vY4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DjnitHHE86 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aG1p6x7vY4 ++ cat /tmp/tmp.DjnitHHE86 ++ rm /tmp/tmp.aG1p6x7vY4 /tmp/tmp.DjnitHHE86 ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EI0Rd9SlzI ++ mktemp + local LAST_ERR=/tmp/tmp.cBzwxqmY5K + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EI0Rd9SlzI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-7403.svc.cluster.local:27017,version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-7403.svc.cluster.local:27017,version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b6b2d54d-0166-4799-aff9-214e37e92017") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.cBzwxqmY5K + rm /tmp/tmp.EI0Rd9SlzI /tmp/tmp.cBzwxqmY5K + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-recommended-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-recommended-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9s7cT5en3V +++ mktemp ++ local LAST_ERR=/tmp/tmp.eiEvtEZ0Ol ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9s7cT5en3V ++ cat /tmp/tmp.eiEvtEZ0Ol ++ rm /tmp/tmp.9s7cT5en3V /tmp/tmp.eiEvtEZ0Ol ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ myApp:myPass@version-service-recommended-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.hHrF5eyRzw ++ mktemp + local LAST_ERR=/tmp/tmp.PJrd9cIWyD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hHrF5eyRzw Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-7403.svc.cluster.local:27017,version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-7403.svc.cluster.local:27017,version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("abd7c58a-1fc5-46ee-b277-20c2d705af23") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.PJrd9cIWyD + rm /tmp/tmp.hHrF5eyRzw /tmp/tmp.PJrd9cIWyD + return 0 + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.JyS8o2LH80 ++ mktemp + local LAST_ERR=/tmp/tmp.WxvGjlELkG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JyS8o2LH80 + cat /tmp/tmp.WxvGjlELkG + rm /tmp/tmp.JyS8o2LH80 /tmp/tmp.WxvGjlELkG + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-recommended-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-recommended-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-recommended-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-recommended-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.DzqboZteXv ++ mktemp + local LAST_ERR=/tmp/tmp.ActlR1FqKe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DzqboZteXv perconaservermongodb.psmdb.percona.com "version-service-recommended" deleted + cat /tmp/tmp.ActlR1FqKe + rm /tmp/tmp.DzqboZteXv /tmp/tmp.ActlR1FqKe + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.gk4eRmSmz0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.tgrEh81nv2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gk4eRmSmz0 ++ cat /tmp/tmp.tgrEh81nv2 ++ rm /tmp/tmp.gk4eRmSmz0 /tmp/tmp.tgrEh81nv2 ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-5rr99 pod "percona-server-mongodb-operator-dd9b64f87-5rr99" deleted + sleep 10 + for i in '"${!cases[@]}"' + desc 'test version-service-latest' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-latest ----------------------------------------------------------------------------------- + cluster=version-service-latest + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.zHYvGXEY2F ++ mktemp + local LAST_ERR=/tmp/tmp.SiywQ330ZJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zHYvGXEY2F secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.SiywQ330ZJ Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.zHYvGXEY2F /tmp/tmp.SiywQ330ZJ + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.DBGXyIhIRQ + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/version-service-latest-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.DBGXyIhIRQ + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.bISVwtGhb7 ++ mktemp + local LAST_ERR=/tmp/tmp.HFUac4vOxI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bISVwtGhb7 perconaservermongodb.psmdb.percona.com/version-service-latest created + cat /tmp/tmp.HFUac4vOxI + rm /tmp/tmp.bISVwtGhb7 /tmp/tmp.HFUac4vOxI + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-latest-rs0 3 + local name=version-service-latest-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-latest ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod version-service-latest-rs0-0 + local pod=version-service-latest-rs0-0 + set +o xtrace waiting for pod/version-service-latest-rs0-0 to be ready............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod version-service-latest-rs0-1 + local pod=version-service-latest-rs0-1 + set +o xtrace waiting for pod/version-service-latest-rs0-1 to be ready............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HUTTe7tPoe +++ mktemp ++ local LAST_ERR=/tmp/tmp.iywMw6wu2t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HUTTe7tPoe ++ cat /tmp/tmp.iywMw6wu2t ++ rm /tmp/tmp.HUTTe7tPoe /tmp/tmp.iywMw6wu2t ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-latest-rs0-2 + local pod=version-service-latest-rs0-2 + set +o xtrace waiting for pod/version-service-latest-rs0-2 to be ready............OK ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NLQbBlzknD +++ mktemp ++ local LAST_ERR=/tmp/tmp.nb5WEajeZa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NLQbBlzknD ++ cat /tmp/tmp.nb5WEajeZa ++ rm /tmp/tmp.NLQbBlzknD /tmp/tmp.nb5WEajeZa ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o24QIMWJWE +++ mktemp ++ local LAST_ERR=/tmp/tmp.W7AhWkbboh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o24QIMWJWE ++ cat /tmp/tmp.W7AhWkbboh ++ rm /tmp/tmp.o24QIMWJWE /tmp/tmp.W7AhWkbboh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.r1xNNGbT9v ++ mktemp + local LAST_ERR=/tmp/tmp.V9Z1hG2Qku + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r1xNNGbT9v + cat /tmp/tmp.V9Z1hG2Qku + rm /tmp/tmp.r1xNNGbT9v /tmp/tmp.V9Z1hG2Qku + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oIJlYO363Z +++ mktemp ++ local LAST_ERR=/tmp/tmp.6nl8KEbum5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oIJlYO363Z ++ cat /tmp/tmp.6nl8KEbum5 ++ rm /tmp/tmp.oIJlYO363Z /tmp/tmp.6nl8KEbum5 ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Irl0ptW7XA ++ mktemp + local LAST_ERR=/tmp/tmp.LaFAQBZk7D + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Irl0ptW7XA Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-2.version-service-latest-rs0.version-service-7403.svc.cluster.local:27017,version-service-latest-rs0-0.version-service-latest-rs0.version-service-7403.svc.cluster.local:27017,version-service-latest-rs0-1.version-service-latest-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("52ef9b1d-e2d7-4be5-974c-10717e7ef2d6") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.LaFAQBZk7D + rm /tmp/tmp.Irl0ptW7XA /tmp/tmp.LaFAQBZk7D + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-latest-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-latest-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SQKpLQMleG +++ mktemp ++ local LAST_ERR=/tmp/tmp.l0rJysFG1z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SQKpLQMleG ++ cat /tmp/tmp.l0rJysFG1z ++ rm /tmp/tmp.SQKpLQMleG /tmp/tmp.l0rJysFG1z ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ myApp:myPass@version-service-latest-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Bva2dwnDxq ++ mktemp + local LAST_ERR=/tmp/tmp.BLkerJxyAg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Bva2dwnDxq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-2.version-service-latest-rs0.version-service-7403.svc.cluster.local:27017,version-service-latest-rs0-0.version-service-latest-rs0.version-service-7403.svc.cluster.local:27017,version-service-latest-rs0-1.version-service-latest-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("3d692624-7244-4abf-901c-bf0b418c1683") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.BLkerJxyAg + rm /tmp/tmp.Bva2dwnDxq /tmp/tmp.BLkerJxyAg + return 0 + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.ocX3Nv1QhO ++ mktemp + local LAST_ERR=/tmp/tmp.6dRMrWJgXx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ocX3Nv1QhO + cat /tmp/tmp.6dRMrWJgXx + rm /tmp/tmp.ocX3Nv1QhO /tmp/tmp.6dRMrWJgXx + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-latest-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-latest-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-latest-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-latest-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.kS5VffMzdf ++ mktemp + local LAST_ERR=/tmp/tmp.NMeBuLw8Zr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kS5VffMzdf perconaservermongodb.psmdb.percona.com "version-service-latest" deleted + cat /tmp/tmp.NMeBuLw8Zr + rm /tmp/tmp.kS5VffMzdf /tmp/tmp.NMeBuLw8Zr + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fy88o7tETp +++ mktemp ++ local LAST_ERR=/tmp/tmp.iAWTT8SYyH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fy88o7tETp ++ cat /tmp/tmp.iAWTT8SYyH ++ rm /tmp/tmp.Fy88o7tETp /tmp/tmp.iAWTT8SYyH ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-6rtb5 pod "percona-server-mongodb-operator-dd9b64f87-6rtb5" deleted + sleep 10 + for i in '"${!cases[@]}"' + desc 'test version-service-major' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-major ----------------------------------------------------------------------------------- + cluster=version-service-major + expected_image=percona/percona-server-mongodb:6.0.4-3 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ijvQesDSNn ++ mktemp + local LAST_ERR=/tmp/tmp.jDhtgo4JvS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ijvQesDSNn secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.jDhtgo4JvS Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.ijvQesDSNn /tmp/tmp.jDhtgo4JvS + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.KMOuPcbWEw + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/version-service-major-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.KMOuPcbWEw + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.XrZvpD8lJx ++ mktemp + local LAST_ERR=/tmp/tmp.rpGkkeCY5u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XrZvpD8lJx perconaservermongodb.psmdb.percona.com/version-service-major created + cat /tmp/tmp.rpGkkeCY5u + rm /tmp/tmp.XrZvpD8lJx /tmp/tmp.rpGkkeCY5u + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-major-rs0 3 + local name=version-service-major-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-major ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod version-service-major-rs0-0 + local pod=version-service-major-rs0-0 + set +o xtrace waiting for pod/version-service-major-rs0-0 to be ready.................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod version-service-major-rs0-1 + local pod=version-service-major-rs0-1 + set +o xtrace waiting for pod/version-service-major-rs0-1 to be ready.................OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DdmK69fZcu +++ mktemp ++ local LAST_ERR=/tmp/tmp.2TYBSIQ1yQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DdmK69fZcu ++ cat /tmp/tmp.2TYBSIQ1yQ ++ rm /tmp/tmp.DdmK69fZcu /tmp/tmp.2TYBSIQ1yQ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-major-rs0-2 + local pod=version-service-major-rs0-2 + set +o xtrace waiting for pod/version-service-major-rs0-2 to be ready..................OK ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HsEQEA4g2P +++ mktemp ++ local LAST_ERR=/tmp/tmp.FvdJgu7tIX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HsEQEA4g2P ++ cat /tmp/tmp.FvdJgu7tIX ++ rm /tmp/tmp.HsEQEA4g2P /tmp/tmp.FvdJgu7tIX ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1CoVqh7XVJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.kpsiWjxYtz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1CoVqh7XVJ ++ cat /tmp/tmp.kpsiWjxYtz ++ rm /tmp/tmp.1CoVqh7XVJ /tmp/tmp.kpsiWjxYtz ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.AmAGiT1EKC ++ mktemp + local LAST_ERR=/tmp/tmp.YeFi5BfGb6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AmAGiT1EKC + cat /tmp/tmp.YeFi5BfGb6 + rm /tmp/tmp.AmAGiT1EKC /tmp/tmp.YeFi5BfGb6 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-major-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-major-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NPAXkHDUN3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.cxAdgi5eo7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NPAXkHDUN3 ++ cat /tmp/tmp.cxAdgi5eo7 ++ rm /tmp/tmp.NPAXkHDUN3 /tmp/tmp.cxAdgi5eo7 ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-major-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JqkzuC7e9Z ++ mktemp + local LAST_ERR=/tmp/tmp.tDvyodJpNf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JqkzuC7e9Z Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-1.version-service-major-rs0.version-service-7403.svc.cluster.local:27017,version-service-major-rs0-2.version-service-major-rs0.version-service-7403.svc.cluster.local:27017,version-service-major-rs0-0.version-service-major-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8b005eea-f8dc-4d91-a700-363de8796e0a") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.tDvyodJpNf + rm /tmp/tmp.JqkzuC7e9Z /tmp/tmp.tDvyodJpNf + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-major-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-major-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4598ONUVvF +++ mktemp ++ local LAST_ERR=/tmp/tmp.XFvCjh8c06 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4598ONUVvF ++ cat /tmp/tmp.XFvCjh8c06 ++ rm /tmp/tmp.4598ONUVvF /tmp/tmp.XFvCjh8c06 ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ myApp:myPass@version-service-major-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.r5KiM4mgFo ++ mktemp + local LAST_ERR=/tmp/tmp.HuDpE1i02m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r5KiM4mgFo Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-1.version-service-major-rs0.version-service-7403.svc.cluster.local:27017,version-service-major-rs0-2.version-service-major-rs0.version-service-7403.svc.cluster.local:27017,version-service-major-rs0-0.version-service-major-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("43483eb5-82ae-452a-b924-a28660d6dd8a") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.HuDpE1i02m + rm /tmp/tmp.r5KiM4mgFo /tmp/tmp.HuDpE1i02m + return 0 + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.bs9zEGwPEV ++ mktemp + local LAST_ERR=/tmp/tmp.MKkMf3UM3v + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bs9zEGwPEV + cat /tmp/tmp.MKkMf3UM3v + rm /tmp/tmp.bs9zEGwPEV /tmp/tmp.MKkMf3UM3v + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-major-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-major-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-major-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-major-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.CyS5JMgGvR ++ mktemp + local LAST_ERR=/tmp/tmp.7TWSQd4u8n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CyS5JMgGvR perconaservermongodb.psmdb.percona.com "version-service-major" deleted + cat /tmp/tmp.7TWSQd4u8n + rm /tmp/tmp.CyS5JMgGvR /tmp/tmp.7TWSQd4u8n + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.6oDglA0rzU +++ mktemp ++ local LAST_ERR=/tmp/tmp.uYdxw6ddWG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6oDglA0rzU ++ cat /tmp/tmp.uYdxw6ddWG ++ rm /tmp/tmp.6oDglA0rzU /tmp/tmp.uYdxw6ddWG ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-m9dr6 pod "percona-server-mongodb-operator-dd9b64f87-m9dr6" deleted + sleep 10 + for i in '"${!cases[@]}"' + desc 'test version-service-unreachable' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-unreachable ----------------------------------------------------------------------------------- + cluster=version-service-unreachable + expected_image=perconalab/percona-server-mongodb-operator:main-mongod7.0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.n2CNvZMJZf ++ mktemp + local LAST_ERR=/tmp/tmp.bzC88sBMVb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.n2CNvZMJZf secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.bzC88sBMVb Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.n2CNvZMJZf /tmp/tmp.bzC88sBMVb + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.5KjUNY7myt + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1855-bb3d32e4%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/conf/version-service-unreachable-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod7.0" | .spec.pmm.image = "perconalab/pmm-client:dev-latest" | .spec.backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.5KjUNY7myt + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.VZm8bQfR8F ++ mktemp + local LAST_ERR=/tmp/tmp.C7NbC9vV6g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VZm8bQfR8F perconaservermongodb.psmdb.percona.com/version-service-unreachable created + cat /tmp/tmp.C7NbC9vV6g + rm /tmp/tmp.VZm8bQfR8F /tmp/tmp.C7NbC9vV6g + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-unreachable-rs0 3 + local name=version-service-unreachable-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-unreachable ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod version-service-unreachable-rs0-0 + local pod=version-service-unreachable-rs0-0 + set +o xtrace waiting for pod/version-service-unreachable-rs0-0 to be ready............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod version-service-unreachable-rs0-1 + local pod=version-service-unreachable-rs0-1 + set +o xtrace waiting for pod/version-service-unreachable-rs0-1 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7CitqyYzgT +++ mktemp ++ local LAST_ERR=/tmp/tmp.29KzzFvNRH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7CitqyYzgT ++ cat /tmp/tmp.29KzzFvNRH ++ rm /tmp/tmp.7CitqyYzgT /tmp/tmp.29KzzFvNRH ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-unreachable-rs0-2 + local pod=version-service-unreachable-rs0-2 + set +o xtrace waiting for pod/version-service-unreachable-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rvFfHCi4aC +++ mktemp ++ local LAST_ERR=/tmp/tmp.NFKUdW7XJq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rvFfHCi4aC ++ cat /tmp/tmp.NFKUdW7XJq ++ rm /tmp/tmp.rvFfHCi4aC /tmp/tmp.NFKUdW7XJq ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4zTB2wUTwJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.2941c3cH9N ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4zTB2wUTwJ ++ cat /tmp/tmp.2941c3cH9N ++ rm /tmp/tmp.4zTB2wUTwJ /tmp/tmp.2941c3cH9N ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.HdakBJWjkB ++ mktemp + local LAST_ERR=/tmp/tmp.pV1ypdaJds + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HdakBJWjkB + cat /tmp/tmp.pV1ypdaJds + rm /tmp/tmp.HdakBJWjkB /tmp/tmp.pV1ypdaJds + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ echo '1.30 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7403 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xcvqefgoFY +++ mktemp ++ local LAST_ERR=/tmp/tmp.NnLczNKHho ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xcvqefgoFY ++ cat /tmp/tmp.NnLczNKHho ++ rm /tmp/tmp.xcvqefgoFY /tmp/tmp.NnLczNKHho ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.12tn64k04f ++ mktemp + local LAST_ERR=/tmp/tmp.CKQA5HlfvW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.12tn64k04f Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-7403.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-7403.svc.cluster.local:27017,version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4920c06e-9a03-43ca-9db9-c547323526a1") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.CKQA5HlfvW + rm /tmp/tmp.12tn64k04f /tmp/tmp.CKQA5HlfvW + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-unreachable-rs0.version-service-7403 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-unreachable-rs0.version-service-7403 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g8Jc6sctPq +++ mktemp ++ local LAST_ERR=/tmp/tmp.T9yn83I6d9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g8Jc6sctPq ++ cat /tmp/tmp.T9yn83I6d9 ++ rm /tmp/tmp.g8Jc6sctPq /tmp/tmp.T9yn83I6d9 ++ return 0 + local client_container=psmdb-client-66f577db5f-hfvhs + local mongo_flag= + [[ myApp:myPass@version-service-unreachable-rs0.version-service-7403 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.OEwrM09zIs ++ mktemp + local LAST_ERR=/tmp/tmp.jxDWZ2fnqK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-66f577db5f-hfvhs -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-7403.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OEwrM09zIs Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-7403.svc.cluster.local:27017,version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-7403.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-7403.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7338ad99-dcea-431e-8cba-829ef7e39445") } Percona Server for MongoDB server version: v7.0.18-11 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.jxDWZ2fnqK + rm /tmp/tmp.OEwrM09zIs /tmp/tmp.jxDWZ2fnqK + return 0 + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7403", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Wl3oNCRmzV ++ mktemp + local LAST_ERR=/tmp/tmp.QBxlZ7dWfM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Wl3oNCRmzV + cat /tmp/tmp.QBxlZ7dWfM + rm /tmp/tmp.Wl3oNCRmzV /tmp/tmp.QBxlZ7dWfM + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.30 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.2WaJ9ND5BX/statefulset_version-service-unreachable-rs0.yml + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-unreachable-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod7.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod7.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod7.0 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-unreachable-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod7.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod7.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod7.0 ']' + for pod in '"${pods[@]}"' ++ kubectl get pod/version-service-unreachable-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod7.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod7.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod7.0 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.Kvu3LEbgDP ++ mktemp + local LAST_ERR=/tmp/tmp.5NO61fryBl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Kvu3LEbgDP perconaservermongodb.psmdb.percona.com "version-service-unreachable" deleted + cat /tmp/tmp.5NO61fryBl + rm /tmp/tmp.Kvu3LEbgDP /tmp/tmp.5NO61fryBl + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.FCVeRwRdpe +++ mktemp ++ local LAST_ERR=/tmp/tmp.n44Ink7Oui ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FCVeRwRdpe ++ cat /tmp/tmp.n44Ink7Oui ++ rm /tmp/tmp.FCVeRwRdpe /tmp/tmp.n44Ink7Oui ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-dd9b64f87-qggd6 pod "percona-server-mongodb-operator-dd9b64f87-qggd6" deleted + sleep 10 + destroy version-service-7403 + local namespace=version-service-7403 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.He8KaPmLuZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.MaIQk7T5Zy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.He8KaPmLuZ ++ cat /tmp/tmp.MaIQk7T5Zy No resources found in version-service-7403 namespace. ++ rm /tmp/tmp.He8KaPmLuZ /tmp/tmp.MaIQk7T5Zy ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.e6s1XEHh61 ++ mktemp + local LAST_ERR=/tmp/tmp.OfNhgszN7f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e6s1XEHh61 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.OfNhgszN7f + rm /tmp/tmp.e6s1XEHh61 /tmp/tmp.OfNhgszN7f + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.7dTXCe1gJV ++ mktemp + local LAST_ERR=/tmp/tmp.VKsvdGzcfN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7dTXCe1gJV + cat /tmp/tmp.VKsvdGzcfN + rm /tmp/tmp.7dTXCe1gJV /tmp/tmp.VKsvdGzcfN + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.bVcs86OMJI ++ mktemp + local LAST_ERR=/tmp/tmp.GgpEv2fywh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bVcs86OMJI + cat /tmp/tmp.GgpEv2fywh + rm /tmp/tmp.bVcs86OMJI /tmp/tmp.GgpEv2fywh + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5SZIbSvt4F ++ mktemp + local LAST_ERR=/tmp/tmp.iqYDZt3hAj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5SZIbSvt4F + cat /tmp/tmp.iqYDZt3hAj + rm /tmp/tmp.5SZIbSvt4F /tmp/tmp.iqYDZt3hAj + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.tpnFQPT3IQ ++ mktemp + local LAST_ERR=/tmp/tmp.Cej6ZPk84h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1855/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tpnFQPT3IQ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.Cej6ZPk84h + rm /tmp/tmp.tpnFQPT3IQ /tmp/tmp.Cej6ZPk84h + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.eyanHWEwSU ++ mktemp + local LAST_ERR=/tmp/tmp.Np2CyO9ytA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eyanHWEwSU + cat /tmp/tmp.Np2CyO9ytA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eyanHWEwSU + cat /tmp/tmp.Np2CyO9ytA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.eyanHWEwSU + cat /tmp/tmp.Np2CyO9ytA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.eyanHWEwSU + cat /tmp/tmp.Np2CyO9ytA Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.eyanHWEwSU /tmp/tmp.Np2CyO9ytA + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.2WaJ9ND5BX + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + kubectl_bin delete --grace-period=0 --force=true namespace version-service-7403 ++ mktemp + local LAST_OUT=/tmp/tmp.Zze5xqB4uq ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.4d2VNZuxxR + desc 'test passed' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_ERR=/tmp/tmp.rjET60NHSU + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.ieKlS58Uzn + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + set +e + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + kubectl delete --grace-period=0 --force=true namespace version-service-7403