Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/logs/version-service.log grep: warning: stray \ before - Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + create_infra version-service-7342 + local ns=version-service-7342 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.xIXEkXS8NY ++ mktemp + local LAST_ERR=/tmp/tmp.kP92VEoZjh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xIXEkXS8NY customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.kP92VEoZjh + rm /tmp/tmp.xIXEkXS8NY /tmp/tmp.kP92VEoZjh + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5tlN7HM1Qo ++ mktemp + local LAST_ERR=/tmp/tmp.9ImLanS7N7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5tlN7HM1Qo + cat /tmp/tmp.9ImLanS7N7 + rm /tmp/tmp.5tlN7HM1Qo /tmp/tmp.9ImLanS7N7 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5nUQulMP1Z ++ mktemp + local LAST_ERR=/tmp/tmp.zeldWTyeFd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5nUQulMP1Z + cat /tmp/tmp.zeldWTyeFd + rm /tmp/tmp.5nUQulMP1Z /tmp/tmp.zeldWTyeFd + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.v0YWUR7cM6 ++ mktemp + local LAST_ERR=/tmp/tmp.p2A2i24oKK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v0YWUR7cM6 + cat /tmp/tmp.p2A2i24oKK + rm /tmp/tmp.v0YWUR7cM6 /tmp/tmp.p2A2i24oKK + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.jgvghkXltw ++ mktemp + local LAST_ERR=/tmp/tmp.XK9CnRRXiY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jgvghkXltw clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.XK9CnRRXiY + rm /tmp/tmp.jgvghkXltw /tmp/tmp.XK9CnRRXiY + return 0 + check_crd_for_deletion PR-1961-5b0cc669 + local git_tag=PR-1961-5b0cc669 ++ yq eval .metadata.name ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1961-5b0cc669/deploy/crd.yaml ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zFdr9hKGDu +++ mktemp ++ local LAST_ERR=/tmp/tmp.eKdkDTPjTc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zFdr9hKGDu ++ cat /tmp/tmp.eKdkDTPjTc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zFdr9hKGDu ++ cat /tmp/tmp.eKdkDTPjTc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zFdr9hKGDu ++ cat /tmp/tmp.eKdkDTPjTc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.zFdr9hKGDu ++ cat /tmp/tmp.eKdkDTPjTc Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.zFdr9hKGDu /tmp/tmp.eKdkDTPjTc ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.eg948BdeOl ++ mktemp + local LAST_OUT=/tmp/tmp.z7KOuICw7x ++ mktemp + local LAST_ERR=/tmp/tmp.8uLYDjEQWK + local exit_status=0 + local LAST_ERR=/tmp/tmp.XajweH2jac + local timeout=4 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eg948BdeOl + cat /tmp/tmp.8uLYDjEQWK + rm /tmp/tmp.eg948BdeOl /tmp/tmp.8uLYDjEQWK + return 0 namespace "version-service-22406" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z7KOuICw7x namespace "psmdb-operator" deleted + cat /tmp/tmp.XajweH2jac + rm /tmp/tmp.z7KOuICw7x /tmp/tmp.XajweH2jac + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.emk9yz8Noj ++ mktemp + local LAST_ERR=/tmp/tmp.IcMSKhcdE3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.emk9yz8Noj + cat /tmp/tmp.IcMSKhcdE3 + rm /tmp/tmp.emk9yz8Noj /tmp/tmp.IcMSKhcdE3 + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.wD0zumnY13 ++ mktemp + local LAST_ERR=/tmp/tmp.SX2XotEkXF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wD0zumnY13 namespace/psmdb-operator created + cat /tmp/tmp.SX2XotEkXF + rm /tmp/tmp.wD0zumnY13 /tmp/tmp.SX2XotEkXF + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.1cX7ooNFuU +++ mktemp ++ local LAST_ERR=/tmp/tmp.tFaymHJHPf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1cX7ooNFuU ++ cat /tmp/tmp.tFaymHJHPf ++ rm /tmp/tmp.1cX7ooNFuU /tmp/tmp.tFaymHJHPf ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ADqySQFcwt ++ mktemp + local LAST_ERR=/tmp/tmp.3o9WNAX3J5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ADqySQFcwt Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster6" modified. + cat /tmp/tmp.3o9WNAX3J5 + rm /tmp/tmp.ADqySQFcwt /tmp/tmp.3o9WNAX3J5 + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.5Bml4ECFBE ++ mktemp + local LAST_ERR=/tmp/tmp.kv8gmPTvas + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5Bml4ECFBE customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.kv8gmPTvas + rm /tmp/tmp.5Bml4ECFBE /tmp/tmp.kv8gmPTvas + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.STTbu8rj94 ++ mktemp + local LAST_ERR=/tmp/tmp.7PzFilRvIB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.STTbu8rj94 clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.7PzFilRvIB + rm /tmp/tmp.STTbu8rj94 /tmp/tmp.7PzFilRvIB + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.1SGszJNWcI ++ mktemp + local LAST_ERR=/tmp/tmp.LyD7DI7eY1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1SGszJNWcI deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.LyD7DI7eY1 + rm /tmp/tmp.1SGszJNWcI /tmp/tmp.LyD7DI7eY1 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.QCRjH1PtVn +++ mktemp ++ local LAST_ERR=/tmp/tmp.JLhHL2pugA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QCRjH1PtVn ++ cat /tmp/tmp.JLhHL2pugA ++ rm /tmp/tmp.QCRjH1PtVn /tmp/tmp.JLhHL2pugA ++ return 0 + wait_operator_pod percona-server-mongodb-operator-5464c7ccdb-jh87l + local pod=percona-server-mongodb-operator-5464c7ccdb-jh87l + set +o xtrace waiting for pod/percona-server-mongodb-operator-5464c7ccdb-jh87l to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Fjj5FaRR2Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.6v0n7KVkfL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Fjj5FaRR2Q ++ cat /tmp/tmp.6v0n7KVkfL ++ rm /tmp/tmp.Fjj5FaRR2Q /tmp/tmp.6v0n7KVkfL ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-5464c7ccdb-jh87l ++ mktemp + local LAST_OUT=/tmp/tmp.nlYrOVJFTS ++ mktemp + local LAST_ERR=/tmp/tmp.fdNFs7n4Kg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-5464c7ccdb-jh87l + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nlYrOVJFTS + cat /tmp/tmp.fdNFs7n4Kg + rm /tmp/tmp.nlYrOVJFTS /tmp/tmp.fdNFs7n4Kg + return 0 2025-12-04T21:58:25.547Z INFO setup Manager starting up {"gitCommit": "5b0cc6694bb3fee55fa4cc155ac44e1b0423e748", "gitBranch": "PR-1961-5b0cc669", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace version-service-7342 + local namespace=version-service-7342 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces version-service-7342' + set +o xtrace + xargs kubectl delete ns ----------------------------------------------------------------------------------- cleaned up old namespaces version-service-7342 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace version-service-7342 --ignore-not-found ++ mktemp ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_OUT=/tmp/tmp.d2zD9vOLiN + local LAST_OUT=/tmp/tmp.lChzWrZCp3 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.flkTb8FwXc + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.xDbXkM19W0 + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace version-service-7342 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d2zD9vOLiN + cat /tmp/tmp.flkTb8FwXc + rm /tmp/tmp.d2zD9vOLiN /tmp/tmp.flkTb8FwXc + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lChzWrZCp3 + cat /tmp/tmp.xDbXkM19W0 + rm /tmp/tmp.lChzWrZCp3 /tmp/tmp.xDbXkM19W0 + return 0 + kubectl_bin wait --for=delete namespace version-service-7342 ++ mktemp + local LAST_OUT=/tmp/tmp.1VqW4kKF8G ++ mktemp + local LAST_ERR=/tmp/tmp.GIN4XOsscb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace version-service-7342 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1VqW4kKF8G + cat /tmp/tmp.GIN4XOsscb + rm /tmp/tmp.1VqW4kKF8G /tmp/tmp.GIN4XOsscb + return 0 + desc 'create namespace version-service-7342' + set +o xtrace ----------------------------------------------------------------------------------- create namespace version-service-7342 ----------------------------------------------------------------------------------- + kubectl_bin create namespace version-service-7342 ++ mktemp + local LAST_OUT=/tmp/tmp.BVrwGM0oRS ++ mktemp + local LAST_ERR=/tmp/tmp.DjJOPrAtFT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace version-service-7342 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BVrwGM0oRS namespace/version-service-7342 created + cat /tmp/tmp.DjJOPrAtFT + rm /tmp/tmp.BVrwGM0oRS /tmp/tmp.DjJOPrAtFT + return 0 + set_kube_ctx version-service-7342 + local namespace=version-service-7342 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.UoFOBfwhqy +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qk7ZbRecsN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UoFOBfwhqy ++ cat /tmp/tmp.Qk7ZbRecsN ++ rm /tmp/tmp.UoFOBfwhqy /tmp/tmp.Qk7ZbRecsN ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster6 --namespace=version-service-7342 ++ mktemp + local LAST_OUT=/tmp/tmp.FpQg0wwOHy ++ mktemp + local LAST_ERR=/tmp/tmp.JNz1Pu0Ekw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster6 --namespace=version-service-7342 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FpQg0wwOHy Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1961-5b0cc669-2-cluster6" modified. + cat /tmp/tmp.JNz1Pu0Ekw + rm /tmp/tmp.FpQg0wwOHy /tmp/tmp.JNz1Pu0Ekw + return 0 + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.YIYzGId29R ++ mktemp + local LAST_ERR=/tmp/tmp.B0Ll1AWctD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YIYzGId29R configmap/versions created + cat /tmp/tmp.B0Ll1AWctD + rm /tmp/tmp.YIYzGId29R /tmp/tmp.B0Ll1AWctD + return 0 + kubectl_bin apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.3Jo3VmpdwL ++ mktemp + local LAST_ERR=/tmp/tmp.f5Oz6B4dtc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3Jo3VmpdwL deployment.apps/version-service created service/version-service created + cat /tmp/tmp.f5Oz6B4dtc + rm /tmp/tmp.3Jo3VmpdwL /tmp/tmp.f5Oz6B4dtc + return 0 + sleep 10 + yq eval '(.. | select(tag == "!!str")) |= sub("version-service$", "version-service-cr")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/vs.yml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RXmmSSl1DJ ++ mktemp + local LAST_ERR=/tmp/tmp.W2uvHJnZer + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RXmmSSl1DJ deployment.apps/version-service-cr created service/version-service-cr created + cat /tmp/tmp.W2uvHJnZer + rm /tmp/tmp.RXmmSSl1DJ /tmp/tmp.W2uvHJnZer + return 0 + kubectl_bin -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 ++ mktemp + local LAST_OUT=/tmp/tmp.xrdQdjIYZb ++ mktemp + local LAST_ERR=/tmp/tmp.4AFeq2mjsB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xrdQdjIYZb deployment.apps/percona-server-mongodb-operator env updated + cat /tmp/tmp.4AFeq2mjsB + rm /tmp/tmp.xrdQdjIYZb /tmp/tmp.4AFeq2mjsB + return 0 + sleep 30 + desc 'enable telemetry on operator level' + set +o xtrace ----------------------------------------------------------------------------------- enable telemetry on operator level ----------------------------------------------------------------------------------- + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "false"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ohIJ7UKfJD + local LAST_OUT=/tmp/tmp.yQcmvD3nP6 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.jY0D6nQoJH + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.U4NV1Qp5JY + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ohIJ7UKfJD + cat /tmp/tmp.U4NV1Qp5JY + rm /tmp/tmp.ohIJ7UKfJD /tmp/tmp.U4NV1Qp5JY + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yQcmvD3nP6 deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.jY0D6nQoJH + rm /tmp/tmp.yQcmvD3nP6 /tmp/tmp.jY0D6nQoJH + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.vIb5UoYnem ++ mktemp + local LAST_ERR=/tmp/tmp.zXEesahPks + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vIb5UoYnem + cat /tmp/tmp.zXEesahPks + rm /tmp/tmp.vIb5UoYnem /tmp/tmp.zXEesahPks + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.61YEyWfE4H +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z1owsiQxK8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.61YEyWfE4H ++ cat /tmp/tmp.Z1owsiQxK8 ++ rm /tmp/tmp.61YEyWfE4H /tmp/tmp.Z1owsiQxK8 ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.1pfqo97SkX +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZtJa0IVkj2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1pfqo97SkX ++ cat /tmp/tmp.ZtJa0IVkj2 ++ rm /tmp/tmp.1pfqo97SkX /tmp/tmp.ZtJa0IVkj2 ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 disabled enabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=enabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.QfYOfc491f ++ mktemp + local LAST_ERR=/tmp/tmp.fNmOBx9jAN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QfYOfc491f deployment.apps/psmdb-client created + cat /tmp/tmp.fNmOBx9jAN + rm /tmp/tmp.QfYOfc491f /tmp/tmp.fNmOBx9jAN + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.wZYiNSdz23 ++ mktemp + local LAST_ERR=/tmp/tmp.J2r1ITnulR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wZYiNSdz23 secret/minimal-cluster created + cat /tmp/tmp.J2r1ITnulR + rm /tmp/tmp.wZYiNSdz23 /tmp/tmp.J2r1ITnulR + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cr-minimal.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.WsVThWoGpE ++ mktemp + local LAST_ERR=/tmp/tmp.cak0ElvwYZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WsVThWoGpE perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.cak0ElvwYZ + rm /tmp/tmp.WsVThWoGpE /tmp/tmp.cak0ElvwYZ + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KKQPKwyxbC +++ mktemp ++ local LAST_ERR=/tmp/tmp.0YqJGqlkMJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KKQPKwyxbC ++ cat /tmp/tmp.0YqJGqlkMJ ++ rm /tmp/tmp.KKQPKwyxbC /tmp/tmp.0YqJGqlkMJ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready...............OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YBA7uAcc53 +++ mktemp ++ local LAST_ERR=/tmp/tmp.oxbEYOotEz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YBA7uAcc53 ++ cat /tmp/tmp.oxbEYOotEz ++ rm /tmp/tmp.YBA7uAcc53 /tmp/tmp.oxbEYOotEz ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nZYYDELg9s +++ mktemp ++ local LAST_ERR=/tmp/tmp.NHHtAslMck ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nZYYDELg9s ++ cat /tmp/tmp.NHHtAslMck ++ rm /tmp/tmp.nZYYDELg9s /tmp/tmp.NHHtAslMck ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7r8vof6HCE +++ mktemp ++ local LAST_ERR=/tmp/tmp.jGaMz21Fmr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7r8vof6HCE ++ cat /tmp/tmp.jGaMz21Fmr ++ rm /tmp/tmp.7r8vof6HCE /tmp/tmp.jGaMz21Fmr ++ return 0 + local client_container=psmdb-client-696897d69b-mflbw + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-mflbw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.xT89fJ9xPT ++ mktemp + local LAST_ERR=/tmp/tmp.MvnrTLk9FG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-mflbw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xT89fJ9xPT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("587382bf-f80f-4262-ada9-84e527aeae9e") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.MvnrTLk9FG + rm /tmp/tmp.xT89fJ9xPT /tmp/tmp.MvnrTLk9FG + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cu3Zkjqcig +++ mktemp ++ local LAST_ERR=/tmp/tmp.fSvRnC8btm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cu3Zkjqcig ++ cat /tmp/tmp.fSvRnC8btm ++ rm /tmp/tmp.Cu3Zkjqcig /tmp/tmp.fSvRnC8btm ++ return 0 + local client_container=psmdb-client-696897d69b-mflbw + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-mflbw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.7PXsuo9H8f ++ mktemp + local LAST_ERR=/tmp/tmp.ddz1WuXFFF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-mflbw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7PXsuo9H8f Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7b06ad34-2a26-49b0-ac41-d4e004b4fd23") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.ddz1WuXFFF + rm /tmp/tmp.7PXsuo9H8f /tmp/tmp.ddz1WuXFFF + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-6fcc64c58f-jdvln -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Cclx5GxbQl ++ mktemp + local LAST_ERR=/tmp/tmp.sdoha25mAp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-6fcc64c58f-jdvln -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Cclx5GxbQl + cat /tmp/tmp.sdoha25mAp + rm /tmp/tmp.Cclx5GxbQl /tmp/tmp.sdoha25mAp + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-65bc6b945c-bfst8 -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.r6wjKSAxMS ++ mktemp + local LAST_ERR=/tmp/tmp.lQKHxf8jG5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-65bc6b945c-bfst8 -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r6wjKSAxMS + cat /tmp/tmp.lQKHxf8jG5 + rm /tmp/tmp.r6wjKSAxMS /tmp/tmp.lQKHxf8jG5 + return 0 + local telemetry_log_file=enabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == enabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.4XGlLxXGvd/enabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json + [[ -s /tmp/tmp.4XGlLxXGvd/enabled_telemetry.version-service-cr.log.json ]] + local telemetry_cr_log_file=enabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a enabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.q8YLAVqHcj +++ mktemp ++ local LAST_ERR=/tmp/tmp.0buTc7G7Xk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q8YLAVqHcj ++ cat /tmp/tmp.0buTc7G7Xk ++ rm /tmp/tmp.q8YLAVqHcj /tmp/tmp.0buTc7G7Xk ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-58fcd49c94-gd7r8 ++ mktemp + local LAST_OUT=/tmp/tmp.tm8Pflarag ++ mktemp + local LAST_ERR=/tmp/tmp.TfiYeUQ08W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-58fcd49c94-gd7r8 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tm8Pflarag pod "percona-server-mongodb-operator-58fcd49c94-gd7r8" deleted from psmdb-operator namespace + cat /tmp/tmp.TfiYeUQ08W + rm /tmp/tmp.tm8Pflarag /tmp/tmp.TfiYeUQ08W + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.sJkRQQ3Acg ++ mktemp + local LAST_ERR=/tmp/tmp.zLbFsCYsOB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sJkRQQ3Acg perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.zLbFsCYsOB + rm /tmp/tmp.sJkRQQ3Acg /tmp/tmp.zLbFsCYsOB + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.uRzMR2gNh7 ++ mktemp + local LAST_ERR=/tmp/tmp.VUqRXl85jL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uRzMR2gNh7 perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-7342 namespace + cat /tmp/tmp.VUqRXl85jL + rm /tmp/tmp.uRzMR2gNh7 /tmp/tmp.VUqRXl85jL + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.0KOnOVP0A2 ++ mktemp + local LAST_ERR=/tmp/tmp.e9D7lT9G9X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0KOnOVP0A2 deployment.apps "psmdb-client" deleted from version-service-7342 namespace + cat /tmp/tmp.e9D7lT9G9X + rm /tmp/tmp.0KOnOVP0A2 /tmp/tmp.e9D7lT9G9X + return 0 + sleep 30 + desc 'disabling telemetry on the operator level' + set +o xtrace ----------------------------------------------------------------------------------- disabling telemetry on the operator level ----------------------------------------------------------------------------------- + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.iGIq4gjF58 ++ mktemp + local LAST_ERR=/tmp/tmp.zmhM03qJ3p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iGIq4gjF58 pod "version-service-cr-6fcc64c58f-jdvln" deleted from psmdb-operator namespace + cat /tmp/tmp.zmhM03qJ3p + rm /tmp/tmp.iGIq4gjF58 /tmp/tmp.zmhM03qJ3p + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.hLgwcuyDfX ++ mktemp + local LAST_ERR=/tmp/tmp.ICkRuemBtn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hLgwcuyDfX pod "version-service-65bc6b945c-bfst8" deleted from psmdb-operator namespace + cat /tmp/tmp.ICkRuemBtn + rm /tmp/tmp.hLgwcuyDfX /tmp/tmp.ICkRuemBtn + return 0 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.pjCW7K9G6v + local LAST_OUT=/tmp/tmp.0dn1DtLO5w ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.LCNzHOTRyv + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.E7BUbmJ0Ub + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pjCW7K9G6v + cat /tmp/tmp.LCNzHOTRyv + rm /tmp/tmp.pjCW7K9G6v /tmp/tmp.LCNzHOTRyv + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0dn1DtLO5w deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.E7BUbmJ0Ub + rm /tmp/tmp.0dn1DtLO5w /tmp/tmp.E7BUbmJ0Ub + return 0 ++ get_mongod_ver_from_image perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' +++ local image=perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ local 'cli=mongod --version' +++ local pod_name=6241 +++ kubectl_bin -n default run 6241 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JaPOABd5AV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.86G4L1Nzdm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 6241 --image=perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JaPOABd5AV +++ cat /tmp/tmp.86G4L1Nzdm +++ rm /tmp/tmp.JaPOABd5AV /tmp/tmp.86G4L1Nzdm +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/6241 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.efH7Ggb7WK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.heRz9QIL82 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/6241 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.efH7Ggb7WK +++ cat /tmp/tmp.heRz9QIL82 +++ rm /tmp/tmp.efH7Ggb7WK /tmp/tmp.heRz9QIL82 +++ return 0 ++++ kubectl_bin -n default exec 6241 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.E7jShXOD2U +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.k7C7ieLE72 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 6241 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.E7jShXOD2U ++++ cat /tmp/tmp.k7C7ieLE72 ++++ rm /tmp/tmp.E7jShXOD2U /tmp/tmp.k7C7ieLE72 ++++ return 0 +++ local 'output=db version v8.0.16-5 Build Info: { "version": "8.0.16-5", "gitVersion": "f174b291665d4de5c104d07bff06eabe7a913868", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/6241 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IWPteabgjf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZwBBSl8ZI4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/6241 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.IWPteabgjf +++ cat /tmp/tmp.ZwBBSl8ZI4 Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.IWPteabgjf /tmp/tmp.ZwBBSl8ZI4 +++ return 0 +++ echo db version v8.0.16-5 Build Info: '{' '"version":' '"8.0.16-5",' '"gitVersion":' '"f174b291665d4de5c104d07bff06eabe7a913868",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.16-5 ++ [[ ! 8.0.16-5 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.16-5 + ACTUAL_MONGOD_VERSION=8.0.16-5 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yHCj1Wj9yw ++ mktemp + local LAST_ERR=/tmp/tmp.qCIJzpj0FL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yHCj1Wj9yw + cat /tmp/tmp.qCIJzpj0FL + rm /tmp/tmp.yHCj1Wj9yw /tmp/tmp.qCIJzpj0FL + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.OXg7dNvWP6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1O456hqWRC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OXg7dNvWP6 ++ cat /tmp/tmp.1O456hqWRC ++ rm /tmp/tmp.OXg7dNvWP6 /tmp/tmp.1O456hqWRC ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.TvEkoXGSJF +++ mktemp ++ local LAST_ERR=/tmp/tmp.yHMpDrNC1g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TvEkoXGSJF ++ cat /tmp/tmp.yHMpDrNC1g ++ rm /tmp/tmp.TvEkoXGSJF /tmp/tmp.yHMpDrNC1g ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 8.0-recommended disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=8.0-recommended + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Km5DCoZO8c ++ mktemp + local LAST_ERR=/tmp/tmp.ly5vyCIpQo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Km5DCoZO8c deployment.apps/psmdb-client created + cat /tmp/tmp.ly5vyCIpQo + rm /tmp/tmp.Km5DCoZO8c /tmp/tmp.ly5vyCIpQo + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.C0Mt4u8Eb6 ++ mktemp + local LAST_ERR=/tmp/tmp.iS1Ip4iB8u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.C0Mt4u8Eb6 secret/minimal-cluster configured + cat /tmp/tmp.iS1Ip4iB8u Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.C0Mt4u8Eb6 /tmp/tmp.iS1Ip4iB8u + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "8.0-recommended" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.i1ONiIlQBO ++ mktemp + local LAST_ERR=/tmp/tmp.DsDjKzw1EX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i1ONiIlQBO perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.DsDjKzw1EX + rm /tmp/tmp.i1ONiIlQBO /tmp/tmp.DsDjKzw1EX + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vzye85nfuZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.2FN4ZPZlxU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vzye85nfuZ ++ cat /tmp/tmp.2FN4ZPZlxU ++ rm /tmp/tmp.vzye85nfuZ /tmp/tmp.2FN4ZPZlxU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready.............OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zn1xRv8Q9L +++ mktemp ++ local LAST_ERR=/tmp/tmp.ON9BmlhY0U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zn1xRv8Q9L ++ cat /tmp/tmp.ON9BmlhY0U ++ rm /tmp/tmp.Zn1xRv8Q9L /tmp/tmp.ON9BmlhY0U ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZrrCGjTk2n +++ mktemp ++ local LAST_ERR=/tmp/tmp.6bhNqtENVP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZrrCGjTk2n ++ cat /tmp/tmp.6bhNqtENVP ++ rm /tmp/tmp.ZrrCGjTk2n /tmp/tmp.6bhNqtENVP ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gB6dQIvri2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SyATC5C8et ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gB6dQIvri2 ++ cat /tmp/tmp.SyATC5C8et ++ rm /tmp/tmp.gB6dQIvri2 /tmp/tmp.SyATC5C8et ++ return 0 + local client_container=psmdb-client-696897d69b-7m25s + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-7m25s -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Vhcd3fhkwY ++ mktemp + local LAST_ERR=/tmp/tmp.MpMm6VaXX8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7m25s -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Vhcd3fhkwY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8189c253-17c8-499c-80f5-5307487ecf8f") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.MpMm6VaXX8 + rm /tmp/tmp.Vhcd3fhkwY /tmp/tmp.MpMm6VaXX8 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7olnQmJ6ij +++ mktemp ++ local LAST_ERR=/tmp/tmp.L1511b0TSF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7olnQmJ6ij ++ cat /tmp/tmp.L1511b0TSF ++ rm /tmp/tmp.7olnQmJ6ij /tmp/tmp.L1511b0TSF ++ return 0 + local client_container=psmdb-client-696897d69b-7m25s + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-7m25s -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IKZHjUPX5E ++ mktemp + local LAST_ERR=/tmp/tmp.FCbqskMKa7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-7m25s -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IKZHjUPX5E Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d9e93510-c765-495d-a417-87b34cf93efe") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.FCbqskMKa7 + rm /tmp/tmp.IKZHjUPX5E /tmp/tmp.FCbqskMKa7 + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-6fcc64c58f-8pzlz -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tVMhJUmPvQ ++ mktemp + local LAST_ERR=/tmp/tmp.c2KxSuvUXh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-6fcc64c58f-8pzlz -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tVMhJUmPvQ + cat /tmp/tmp.c2KxSuvUXh + rm /tmp/tmp.tVMhJUmPvQ /tmp/tmp.c2KxSuvUXh + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-65bc6b945c-zqjwz -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.IhKn0EzvMc ++ mktemp + local LAST_ERR=/tmp/tmp.cMNX5QZFpL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-65bc6b945c-zqjwz -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IhKn0EzvMc + cat /tmp/tmp.cMNX5QZFpL + rm /tmp/tmp.IhKn0EzvMc /tmp/tmp.cMNX5QZFpL + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' 8.0-recommended == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=8.0 + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' 8.0-recommended == 8.0-recommended -a disabled == disabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.4XGlLxXGvd/disabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json + [[ -s /tmp/tmp.4XGlLxXGvd/disabled_telemetry.version-service.log.json ]] + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' 8.0-recommended == disabled -a disabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.rgJX7iAdvz +++ mktemp ++ local LAST_ERR=/tmp/tmp.GelAl0ItxA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rgJX7iAdvz ++ cat /tmp/tmp.GelAl0ItxA ++ rm /tmp/tmp.rgJX7iAdvz /tmp/tmp.GelAl0ItxA ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-qvwzq ++ mktemp + local LAST_OUT=/tmp/tmp.1ByYHSyfgg ++ mktemp + local LAST_ERR=/tmp/tmp.FlOtOmmSmH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-qvwzq + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1ByYHSyfgg pod "percona-server-mongodb-operator-894779dd9-qvwzq" deleted from psmdb-operator namespace + cat /tmp/tmp.FlOtOmmSmH + rm /tmp/tmp.1ByYHSyfgg /tmp/tmp.FlOtOmmSmH + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.IN6qi8aaAs ++ mktemp + local LAST_ERR=/tmp/tmp.BhZEOwfJwW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IN6qi8aaAs perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.BhZEOwfJwW + rm /tmp/tmp.IN6qi8aaAs /tmp/tmp.BhZEOwfJwW + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.diX16juj6M ++ mktemp + local LAST_ERR=/tmp/tmp.TLMTGTlruC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.diX16juj6M perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-7342 namespace + cat /tmp/tmp.TLMTGTlruC + rm /tmp/tmp.diX16juj6M /tmp/tmp.TLMTGTlruC + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.YK3kJMuSkl ++ mktemp + local LAST_ERR=/tmp/tmp.pFIIYaNtWB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YK3kJMuSkl deployment.apps "psmdb-client" deleted from version-service-7342 namespace + cat /tmp/tmp.pFIIYaNtWB + rm /tmp/tmp.YK3kJMuSkl /tmp/tmp.pFIIYaNtWB + return 0 + sleep 30 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.SktI76F7Q4 + local LAST_OUT=/tmp/tmp.gqGjefSq0K ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.EV2JyEeQQF + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.U6WHJVlIKQ + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gqGjefSq0K + cat /tmp/tmp.EV2JyEeQQF + rm /tmp/tmp.gqGjefSq0K /tmp/tmp.EV2JyEeQQF + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SktI76F7Q4 deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.U6WHJVlIKQ + rm /tmp/tmp.SktI76F7Q4 /tmp/tmp.U6WHJVlIKQ + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.PAIgndFsJx ++ mktemp + local LAST_ERR=/tmp/tmp.YIWICdhqtp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PAIgndFsJx + cat /tmp/tmp.YIWICdhqtp + rm /tmp/tmp.PAIgndFsJx /tmp/tmp.YIWICdhqtp + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.cg4zOTwRjG +++ mktemp ++ local LAST_ERR=/tmp/tmp.TBC4afCaBw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cg4zOTwRjG ++ cat /tmp/tmp.TBC4afCaBw ++ rm /tmp/tmp.cg4zOTwRjG /tmp/tmp.TBC4afCaBw ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.0yg4AGbCMD +++ mktemp ++ local LAST_ERR=/tmp/tmp.ghdwKMIYVg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0yg4AGbCMD ++ cat /tmp/tmp.ghdwKMIYVg ++ rm /tmp/tmp.0yg4AGbCMD /tmp/tmp.ghdwKMIYVg ++ return 0 + '[' 1 == 1 ']' + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.i68VSWNDqi ++ mktemp + local LAST_ERR=/tmp/tmp.hNio7TwZDW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i68VSWNDqi pod "version-service-cr-6fcc64c58f-8pzlz" deleted from psmdb-operator namespace + cat /tmp/tmp.hNio7TwZDW + rm /tmp/tmp.i68VSWNDqi /tmp/tmp.hNio7TwZDW + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.uNlqWznKhg ++ mktemp + local LAST_ERR=/tmp/tmp.3SU6at3sYO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uNlqWznKhg pod "version-service-65bc6b945c-zqjwz" deleted from psmdb-operator namespace + cat /tmp/tmp.3SU6at3sYO + rm /tmp/tmp.uNlqWznKhg /tmp/tmp.3SU6at3sYO + return 0 + check_telemetry_transfer http://version-service-cr:11000 disabled disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.vX9ER52AaP ++ mktemp + local LAST_ERR=/tmp/tmp.CJqi9aCK7R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vX9ER52AaP deployment.apps/psmdb-client created + cat /tmp/tmp.CJqi9aCK7R + rm /tmp/tmp.vX9ER52AaP /tmp/tmp.CJqi9aCK7R + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.EpD8djDNVB ++ mktemp + local LAST_ERR=/tmp/tmp.IuHBWrp5Em + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EpD8djDNVB secret/minimal-cluster configured + cat /tmp/tmp.IuHBWrp5Em Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.EpD8djDNVB /tmp/tmp.IuHBWrp5Em + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669" | .spec.crVersion = "9.9.9" | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.6FtrGesrGL ++ mktemp + local LAST_ERR=/tmp/tmp.Rp1y9JvG4o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6FtrGesrGL perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.Rp1y9JvG4o + rm /tmp/tmp.6FtrGesrGL /tmp/tmp.Rp1y9JvG4o + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kVjwOKKyk1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wQGMOTETnG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kVjwOKKyk1 ++ cat /tmp/tmp.wQGMOTETnG ++ rm /tmp/tmp.kVjwOKKyk1 /tmp/tmp.wQGMOTETnG ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready............OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0seo0jqMOY +++ mktemp ++ local LAST_ERR=/tmp/tmp.equKNYKHi0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0seo0jqMOY ++ cat /tmp/tmp.equKNYKHi0 ++ rm /tmp/tmp.0seo0jqMOY /tmp/tmp.equKNYKHi0 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5NTS92me4h +++ mktemp ++ local LAST_ERR=/tmp/tmp.sp79xnS457 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5NTS92me4h ++ cat /tmp/tmp.sp79xnS457 ++ rm /tmp/tmp.5NTS92me4h /tmp/tmp.sp79xnS457 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................... + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tD7EKpafCS +++ mktemp ++ local LAST_ERR=/tmp/tmp.GBwe9K5yuE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tD7EKpafCS ++ cat /tmp/tmp.GBwe9K5yuE ++ rm /tmp/tmp.tD7EKpafCS /tmp/tmp.GBwe9K5yuE ++ return 0 + local client_container=psmdb-client-696897d69b-5r2h7 + local mongo_flag= + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-5r2h7 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.E7tikZY30x ++ mktemp + local LAST_ERR=/tmp/tmp.f8A7z9IdXF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-5r2h7 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E7tikZY30x Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("0827755d-a96a-442d-95a9-8f354d079811") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.f8A7z9IdXF + rm /tmp/tmp.E7tikZY30x /tmp/tmp.f8A7z9IdXF + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xTGglhQmxn +++ mktemp ++ local LAST_ERR=/tmp/tmp.cPZpg0jlhK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xTGglhQmxn ++ cat /tmp/tmp.cPZpg0jlhK ++ rm /tmp/tmp.xTGglhQmxn /tmp/tmp.cPZpg0jlhK ++ return 0 + local client_container=psmdb-client-696897d69b-5r2h7 + local mongo_flag= + [[ myApp:myPass@minimal-cluster-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-5r2h7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.T9HVKsZggq ++ mktemp + local LAST_ERR=/tmp/tmp.Zx7thAUJPL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-5r2h7 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T9HVKsZggq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("7d4d4ab7-ceab-444a-b46b-9bee0b9e4f79") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Zx7thAUJPL + rm /tmp/tmp.T9HVKsZggq /tmp/tmp.Zx7thAUJPL + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-6fcc64c58f-q5xpn -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.6udHlhmeNe ++ mktemp + local LAST_ERR=/tmp/tmp.ShMFllmYX6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-6fcc64c58f-q5xpn -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6udHlhmeNe + cat /tmp/tmp.ShMFllmYX6 + rm /tmp/tmp.6udHlhmeNe /tmp/tmp.ShMFllmYX6 + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-65bc6b945c-6mjtp -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.e5x0VaiUvm ++ mktemp + local LAST_ERR=/tmp/tmp.UuxQ6AyupY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-65bc6b945c-6mjtp -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e5x0VaiUvm + cat /tmp/tmp.UuxQ6AyupY + rm /tmp/tmp.e5x0VaiUvm /tmp/tmp.UuxQ6AyupY + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a disabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == disabled ']' + [[ -s /tmp/tmp.4XGlLxXGvd/disabled_telemetry.version-service-cr.log.json ]] + [[ -s /tmp/tmp.4XGlLxXGvd/disabled_telemetry.version-service.log.json ]] ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.0oWOotfBIw +++ mktemp ++ local LAST_ERR=/tmp/tmp.TIEQhVq7iI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0oWOotfBIw ++ cat /tmp/tmp.TIEQhVq7iI ++ rm /tmp/tmp.0oWOotfBIw /tmp/tmp.TIEQhVq7iI ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-cfsqd ++ mktemp + local LAST_OUT=/tmp/tmp.8XiRyhaXjo ++ mktemp + local LAST_ERR=/tmp/tmp.FvGORvC3bL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-cfsqd + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8XiRyhaXjo pod "percona-server-mongodb-operator-894779dd9-cfsqd" deleted from psmdb-operator namespace + cat /tmp/tmp.FvGORvC3bL + rm /tmp/tmp.8XiRyhaXjo /tmp/tmp.FvGORvC3bL + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.QGbJPe3s0i ++ mktemp + local LAST_ERR=/tmp/tmp.dT9gzcipuM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QGbJPe3s0i perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.dT9gzcipuM + rm /tmp/tmp.QGbJPe3s0i /tmp/tmp.dT9gzcipuM + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.YRIA6pHdmj ++ mktemp + local LAST_ERR=/tmp/tmp.PrA8C0MU3M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YRIA6pHdmj perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-7342 namespace + cat /tmp/tmp.PrA8C0MU3M + rm /tmp/tmp.YRIA6pHdmj /tmp/tmp.PrA8C0MU3M + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.xZxfTGKzM3 ++ mktemp + local LAST_ERR=/tmp/tmp.aqFkjh4jyw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xZxfTGKzM3 deployment.apps "psmdb-client" deleted from version-service-7342 namespace + cat /tmp/tmp.aqFkjh4jyw + rm /tmp/tmp.xZxfTGKzM3 /tmp/tmp.aqFkjh4jyw + return 0 + sleep 30 + cases=("version-service-exact" "version-service-recommended" "version-service-latest" "version-service-major" "version-service-unreachable") + expected_images=("percona/percona-server-mongodb:6.0.3-2" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:6.0.4-3" "$IMAGE_MONGOD") + for i in "${!cases[@]}" + desc 'test version-service-exact' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-exact ----------------------------------------------------------------------------------- + cluster=version-service-exact + expected_image=percona/percona-server-mongodb:6.0.3-2 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.pwjDbIs7GQ ++ mktemp + local LAST_ERR=/tmp/tmp.U3Fxc0jXpq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pwjDbIs7GQ secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.U3Fxc0jXpq + rm /tmp/tmp.pwjDbIs7GQ /tmp/tmp.U3Fxc0jXpq + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.PysfdpEls8 + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/version-service-exact-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /tmp/tmp.PysfdpEls8 + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.fTj82d3Fkt ++ mktemp + local LAST_ERR=/tmp/tmp.iQdeuOrxMe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fTj82d3Fkt perconaservermongodb.psmdb.percona.com/version-service-exact created + cat /tmp/tmp.iQdeuOrxMe + rm /tmp/tmp.fTj82d3Fkt /tmp/tmp.iQdeuOrxMe + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-exact-rs0 3 + local name=version-service-exact-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-exact ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-exact-rs0-0 + local pod=version-service-exact-rs0-0 + set +o xtrace waiting for pod/version-service-exact-rs0-0 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-exact-rs0-1 + local pod=version-service-exact-rs0-1 + set +o xtrace waiting for pod/version-service-exact-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.F2CEitJYfX +++ mktemp ++ local LAST_ERR=/tmp/tmp.SMPehiTn24 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.F2CEitJYfX ++ cat /tmp/tmp.SMPehiTn24 ++ rm /tmp/tmp.F2CEitJYfX /tmp/tmp.SMPehiTn24 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-exact-rs0-2 + local pod=version-service-exact-rs0-2 + set +o xtrace waiting for pod/version-service-exact-rs0-2 to be ready.........OK ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rMPc21I1pX +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ytpzp7ju3e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rMPc21I1pX ++ cat /tmp/tmp.Ytpzp7ju3e ++ rm /tmp/tmp.rMPc21I1pX /tmp/tmp.Ytpzp7ju3e ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UpPf22qFZN +++ mktemp ++ local LAST_ERR=/tmp/tmp.SWuogXq1C8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UpPf22qFZN ++ cat /tmp/tmp.SWuogXq1C8 ++ rm /tmp/tmp.UpPf22qFZN /tmp/tmp.SWuogXq1C8 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness. + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.2nZi6RXOFa ++ mktemp + local LAST_ERR=/tmp/tmp.VMDAf7xisc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2nZi6RXOFa + cat /tmp/tmp.VMDAf7xisc + rm /tmp/tmp.2nZi6RXOFa /tmp/tmp.VMDAf7xisc + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + log 'compare_kubectl: statefulset/version-service-exact-rs0 OK' + set +o xtrace [2025-12-04T22:11:16+0000] compare_kubectl: statefulset/version-service-exact-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.atlSmle8Ub +++ mktemp ++ local LAST_ERR=/tmp/tmp.JiU3MjRHTl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.atlSmle8Ub ++ cat /tmp/tmp.JiU3MjRHTl ++ rm /tmp/tmp.atlSmle8Ub /tmp/tmp.JiU3MjRHTl ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fPTzM4PHWU ++ mktemp + local LAST_ERR=/tmp/tmp.QCZ1QhQ7cd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fPTzM4PHWU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-1.version-service-exact-rs0.version-service-7342.svc.cluster.local:27017,version-service-exact-rs0-0.version-service-exact-rs0.version-service-7342.svc.cluster.local:27017,version-service-exact-rs0-2.version-service-exact-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f574eba1-0961-4c62-9d6f-ccdb4c1406a5") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.QCZ1QhQ7cd + rm /tmp/tmp.fPTzM4PHWU /tmp/tmp.QCZ1QhQ7cd + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-exact-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-exact-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2H3Ez1VSHL +++ mktemp ++ local LAST_ERR=/tmp/tmp.OT7PxEGULx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2H3Ez1VSHL ++ cat /tmp/tmp.OT7PxEGULx ++ rm /tmp/tmp.2H3Ez1VSHL /tmp/tmp.OT7PxEGULx ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ myApp:myPass@version-service-exact-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.bOTGbBg010 ++ mktemp + local LAST_ERR=/tmp/tmp.mzqGKTBzya + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bOTGbBg010 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-2.version-service-exact-rs0.version-service-7342.svc.cluster.local:27017,version-service-exact-rs0-1.version-service-exact-rs0.version-service-7342.svc.cluster.local:27017,version-service-exact-rs0-0.version-service-exact-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c77d8034-44e6-4715-b6f5-ed8daf7258e1") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.mzqGKTBzya + rm /tmp/tmp.bOTGbBg010 /tmp/tmp.mzqGKTBzya + return 0 + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.1bEnTzSUEq ++ mktemp + local LAST_ERR=/tmp/tmp.pgPiTTCO3M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1bEnTzSUEq + cat /tmp/tmp.pgPiTTCO3M + rm /tmp/tmp.1bEnTzSUEq /tmp/tmp.pgPiTTCO3M + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-exact-rs0.yml + log 'compare_kubectl: statefulset/version-service-exact-rs0 OK' + set +o xtrace [2025-12-04T22:11:23+0000] compare_kubectl: statefulset/version-service-exact-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.QsVo2YvRBs ++ mktemp + local LAST_ERR=/tmp/tmp.DXHgkSr12u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QsVo2YvRBs perconaservermongodb.psmdb.percona.com "version-service-exact" deleted from version-service-7342 namespace + cat /tmp/tmp.DXHgkSr12u + rm /tmp/tmp.QsVo2YvRBs /tmp/tmp.DXHgkSr12u + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.CNk0rzBp3c +++ mktemp ++ local LAST_ERR=/tmp/tmp.3j7wueX8Th ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CNk0rzBp3c ++ cat /tmp/tmp.3j7wueX8Th ++ rm /tmp/tmp.CNk0rzBp3c /tmp/tmp.3j7wueX8Th ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-lnjcn pod "percona-server-mongodb-operator-894779dd9-lnjcn" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-recommended' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-recommended ----------------------------------------------------------------------------------- + cluster=version-service-recommended + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.vavsIEDe8r ++ mktemp + local LAST_ERR=/tmp/tmp.vlRIxWtS0J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vavsIEDe8r secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.vlRIxWtS0J Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.vavsIEDe8r /tmp/tmp.vlRIxWtS0J + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.cX2RKbcprK + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/version-service-recommended-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /tmp/tmp.cX2RKbcprK + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.AoHzsc10sP ++ mktemp + local LAST_ERR=/tmp/tmp.UWj5UVBaaR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AoHzsc10sP perconaservermongodb.psmdb.percona.com/version-service-recommended created + cat /tmp/tmp.UWj5UVBaaR + rm /tmp/tmp.AoHzsc10sP /tmp/tmp.UWj5UVBaaR + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-recommended-rs0 3 + local name=version-service-recommended-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-recommended ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-recommended-rs0-0 + local pod=version-service-recommended-rs0-0 + set +o xtrace waiting for pod/version-service-recommended-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-recommended-rs0-1 + local pod=version-service-recommended-rs0-1 + set +o xtrace waiting for pod/version-service-recommended-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3kcQicMavj +++ mktemp ++ local LAST_ERR=/tmp/tmp.VNSogmOixi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3kcQicMavj ++ cat /tmp/tmp.VNSogmOixi ++ rm /tmp/tmp.3kcQicMavj /tmp/tmp.VNSogmOixi ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-recommended-rs0-2 + local pod=version-service-recommended-rs0-2 + set +o xtrace waiting for pod/version-service-recommended-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Kqdq5ClGDB +++ mktemp ++ local LAST_ERR=/tmp/tmp.qEOziweK0F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Kqdq5ClGDB ++ cat /tmp/tmp.qEOziweK0F ++ rm /tmp/tmp.Kqdq5ClGDB /tmp/tmp.qEOziweK0F ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RnPlWvoqCy +++ mktemp ++ local LAST_ERR=/tmp/tmp.jbp8zL9Nnh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RnPlWvoqCy ++ cat /tmp/tmp.jbp8zL9Nnh ++ rm /tmp/tmp.RnPlWvoqCy /tmp/tmp.jbp8zL9Nnh ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.EA922DrD8v ++ mktemp + local LAST_ERR=/tmp/tmp.ucEvzakjln + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EA922DrD8v + cat /tmp/tmp.ucEvzakjln + rm /tmp/tmp.EA922DrD8v /tmp/tmp.ucEvzakjln + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + log 'compare_kubectl: statefulset/version-service-recommended-rs0 OK' + set +o xtrace [2025-12-04T22:13:40+0000] compare_kubectl: statefulset/version-service-recommended-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wQjtM2qnMC +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Qka2fXQzm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wQjtM2qnMC ++ cat /tmp/tmp.2Qka2fXQzm ++ rm /tmp/tmp.wQjtM2qnMC /tmp/tmp.2Qka2fXQzm ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RkZA5lSrF7 ++ mktemp + local LAST_ERR=/tmp/tmp.RHmjcML9R0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RkZA5lSrF7 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-7342.svc.cluster.local:27017,version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-7342.svc.cluster.local:27017,version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2432fcb6-87ce-4c47-a57b-6dc864fcbe81") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.RHmjcML9R0 + rm /tmp/tmp.RkZA5lSrF7 /tmp/tmp.RHmjcML9R0 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-recommended-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-recommended-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gvDlCKbQzZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.CeI6GZ4UpA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gvDlCKbQzZ ++ cat /tmp/tmp.CeI6GZ4UpA ++ rm /tmp/tmp.gvDlCKbQzZ /tmp/tmp.CeI6GZ4UpA ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ myApp:myPass@version-service-recommended-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.dHTyKbKYeH ++ mktemp + local LAST_ERR=/tmp/tmp.6wK5mi2OAn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dHTyKbKYeH Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-7342.svc.cluster.local:27017,version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-7342.svc.cluster.local:27017,version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8d3ab2c4-8f85-458e-afa1-1bed1d09b247") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.6wK5mi2OAn + rm /tmp/tmp.dHTyKbKYeH /tmp/tmp.6wK5mi2OAn + return 0 + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Kn3CxuwKGn ++ mktemp + local LAST_ERR=/tmp/tmp.RtlhN43pG4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Kn3CxuwKGn + cat /tmp/tmp.RtlhN43pG4 + rm /tmp/tmp.Kn3CxuwKGn /tmp/tmp.RtlhN43pG4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-recommended-rs0.yml + log 'compare_kubectl: statefulset/version-service-recommended-rs0 OK' + set +o xtrace [2025-12-04T22:13:46+0000] compare_kubectl: statefulset/version-service-recommended-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.53W1f6nDTx ++ mktemp + local LAST_ERR=/tmp/tmp.gQAQHyTFDm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.53W1f6nDTx perconaservermongodb.psmdb.percona.com "version-service-recommended" deleted from version-service-7342 namespace + cat /tmp/tmp.gQAQHyTFDm + rm /tmp/tmp.53W1f6nDTx /tmp/tmp.gQAQHyTFDm + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.v8uuCq7ABV +++ mktemp ++ local LAST_ERR=/tmp/tmp.x69vXLzsIM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v8uuCq7ABV ++ cat /tmp/tmp.x69vXLzsIM ++ rm /tmp/tmp.v8uuCq7ABV /tmp/tmp.x69vXLzsIM ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-gjjcs pod "percona-server-mongodb-operator-894779dd9-gjjcs" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-latest' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-latest ----------------------------------------------------------------------------------- + cluster=version-service-latest + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.KNkR0zBVzG ++ mktemp + local LAST_ERR=/tmp/tmp.8iYD4kXNGw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KNkR0zBVzG secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.8iYD4kXNGw Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.KNkR0zBVzG /tmp/tmp.8iYD4kXNGw + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.YF7mgpmx6h + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/version-service-latest-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /tmp/tmp.YF7mgpmx6h + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.l7cODYmI3h ++ mktemp + local LAST_ERR=/tmp/tmp.gBNm5NNrLe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l7cODYmI3h perconaservermongodb.psmdb.percona.com/version-service-latest created + cat /tmp/tmp.gBNm5NNrLe + rm /tmp/tmp.l7cODYmI3h /tmp/tmp.gBNm5NNrLe + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-latest-rs0 3 + local name=version-service-latest-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-latest ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-latest-rs0-0 + local pod=version-service-latest-rs0-0 + set +o xtrace waiting for pod/version-service-latest-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-latest-rs0-1 + local pod=version-service-latest-rs0-1 + set +o xtrace waiting for pod/version-service-latest-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7HZgCw9ygm +++ mktemp ++ local LAST_ERR=/tmp/tmp.jfxqJ9ln8T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7HZgCw9ygm ++ cat /tmp/tmp.jfxqJ9ln8T ++ rm /tmp/tmp.7HZgCw9ygm /tmp/tmp.jfxqJ9ln8T ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-latest-rs0-2 + local pod=version-service-latest-rs0-2 + set +o xtrace waiting for pod/version-service-latest-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f9ytJvIN5C +++ mktemp ++ local LAST_ERR=/tmp/tmp.nWiQsbAATR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f9ytJvIN5C ++ cat /tmp/tmp.nWiQsbAATR ++ rm /tmp/tmp.f9ytJvIN5C /tmp/tmp.nWiQsbAATR ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.am6FrFMLx1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KxfGBRunRS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.am6FrFMLx1 ++ cat /tmp/tmp.KxfGBRunRS ++ rm /tmp/tmp.am6FrFMLx1 /tmp/tmp.KxfGBRunRS ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.aNyU6Vfgd0 ++ mktemp + local LAST_ERR=/tmp/tmp.reUgM4qGqe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aNyU6Vfgd0 + cat /tmp/tmp.reUgM4qGqe + rm /tmp/tmp.aNyU6Vfgd0 /tmp/tmp.reUgM4qGqe + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + log 'compare_kubectl: statefulset/version-service-latest-rs0 OK' + set +o xtrace [2025-12-04T22:15:59+0000] compare_kubectl: statefulset/version-service-latest-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XWBA2sSkLo +++ mktemp ++ local LAST_ERR=/tmp/tmp.HdejIH88tA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XWBA2sSkLo ++ cat /tmp/tmp.HdejIH88tA ++ rm /tmp/tmp.XWBA2sSkLo /tmp/tmp.HdejIH88tA ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.N5EJKHHsME ++ mktemp + local LAST_ERR=/tmp/tmp.YpjFvljnVZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.N5EJKHHsME Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-1.version-service-latest-rs0.version-service-7342.svc.cluster.local:27017,version-service-latest-rs0-0.version-service-latest-rs0.version-service-7342.svc.cluster.local:27017,version-service-latest-rs0-2.version-service-latest-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("224fad5c-181d-409b-8f34-9061f0477cba") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.YpjFvljnVZ + rm /tmp/tmp.N5EJKHHsME /tmp/tmp.YpjFvljnVZ + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-latest-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-latest-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bi3cmdSAk4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yC4pcoQVBY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bi3cmdSAk4 ++ cat /tmp/tmp.yC4pcoQVBY ++ rm /tmp/tmp.bi3cmdSAk4 /tmp/tmp.yC4pcoQVBY ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ myApp:myPass@version-service-latest-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.1co1J2x8gI ++ mktemp + local LAST_ERR=/tmp/tmp.Bjnhc5Rf3g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1co1J2x8gI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-1.version-service-latest-rs0.version-service-7342.svc.cluster.local:27017,version-service-latest-rs0-0.version-service-latest-rs0.version-service-7342.svc.cluster.local:27017,version-service-latest-rs0-2.version-service-latest-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ba7e0c8b-46c3-4a1e-935a-5ab775aa6401") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Bjnhc5Rf3g + rm /tmp/tmp.1co1J2x8gI /tmp/tmp.Bjnhc5Rf3g + return 0 + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.BCDeNXIykn ++ mktemp + local LAST_ERR=/tmp/tmp.95MRB8PPVO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BCDeNXIykn + cat /tmp/tmp.95MRB8PPVO + rm /tmp/tmp.BCDeNXIykn /tmp/tmp.95MRB8PPVO + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-latest-rs0.yml + log 'compare_kubectl: statefulset/version-service-latest-rs0 OK' + set +o xtrace [2025-12-04T22:16:05+0000] compare_kubectl: statefulset/version-service-latest-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.zPASyhnSG0 ++ mktemp + local LAST_ERR=/tmp/tmp.1fVNJyGH9B + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zPASyhnSG0 perconaservermongodb.psmdb.percona.com "version-service-latest" deleted from version-service-7342 namespace + cat /tmp/tmp.1fVNJyGH9B + rm /tmp/tmp.zPASyhnSG0 /tmp/tmp.1fVNJyGH9B + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.g0SLimJZxf +++ mktemp ++ local LAST_ERR=/tmp/tmp.HrPMnPakMa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g0SLimJZxf ++ cat /tmp/tmp.HrPMnPakMa ++ rm /tmp/tmp.g0SLimJZxf /tmp/tmp.HrPMnPakMa ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-7w7x7 pod "percona-server-mongodb-operator-894779dd9-7w7x7" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-major' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-major ----------------------------------------------------------------------------------- + cluster=version-service-major + expected_image=percona/percona-server-mongodb:6.0.4-3 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4MAxG1eZCq ++ mktemp + local LAST_ERR=/tmp/tmp.NyxcWjdXhI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4MAxG1eZCq secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.NyxcWjdXhI Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.4MAxG1eZCq /tmp/tmp.NyxcWjdXhI + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.eZTrxT69Ba + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/version-service-major-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /tmp/tmp.eZTrxT69Ba ++ mktemp + local LAST_OUT=/tmp/tmp.yWpNdUqQu6 ++ mktemp + local LAST_ERR=/tmp/tmp.g69uF63GBy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yWpNdUqQu6 perconaservermongodb.psmdb.percona.com/version-service-major created + cat /tmp/tmp.g69uF63GBy + rm /tmp/tmp.yWpNdUqQu6 /tmp/tmp.g69uF63GBy + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-major-rs0 3 + local name=version-service-major-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-major ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-major-rs0-0 + local pod=version-service-major-rs0-0 + set +o xtrace waiting for pod/version-service-major-rs0-0 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-major-rs0-1 + local pod=version-service-major-rs0-1 + set +o xtrace waiting for pod/version-service-major-rs0-1 to be ready..................OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jP3bkJGzau +++ mktemp ++ local LAST_ERR=/tmp/tmp.ihkHd4B5RD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jP3bkJGzau ++ cat /tmp/tmp.ihkHd4B5RD ++ rm /tmp/tmp.jP3bkJGzau /tmp/tmp.ihkHd4B5RD ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-major-rs0-2 + local pod=version-service-major-rs0-2 + set +o xtrace waiting for pod/version-service-major-rs0-2 to be ready................OK ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lLXEU4xBtr +++ mktemp ++ local LAST_ERR=/tmp/tmp.mzE7R1GtJ2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lLXEU4xBtr ++ cat /tmp/tmp.mzE7R1GtJ2 ++ rm /tmp/tmp.lLXEU4xBtr /tmp/tmp.mzE7R1GtJ2 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Zpr1TjQfbz +++ mktemp ++ local LAST_ERR=/tmp/tmp.j5JpF3Ixu0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Zpr1TjQfbz ++ cat /tmp/tmp.j5JpF3Ixu0 ++ rm /tmp/tmp.Zpr1TjQfbz /tmp/tmp.j5JpF3Ixu0 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | ++ mktemp del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.5dIUsjytNb ++ mktemp + local LAST_ERR=/tmp/tmp.v3gY32HYwH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5dIUsjytNb + cat /tmp/tmp.v3gY32HYwH + rm /tmp/tmp.5dIUsjytNb /tmp/tmp.v3gY32HYwH + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + log 'compare_kubectl: statefulset/version-service-major-rs0 OK' + set +o xtrace [2025-12-04T22:18:58+0000] compare_kubectl: statefulset/version-service-major-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-major-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-major-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IZ1SvGWKpe +++ mktemp ++ local LAST_ERR=/tmp/tmp.SGKvj9Na5m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IZ1SvGWKpe ++ cat /tmp/tmp.SGKvj9Na5m ++ rm /tmp/tmp.IZ1SvGWKpe /tmp/tmp.SGKvj9Na5m ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-major-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.dh51egPYyZ ++ mktemp + local LAST_ERR=/tmp/tmp.fS8vQ6rfOz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dh51egPYyZ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-1.version-service-major-rs0.version-service-7342.svc.cluster.local:27017,version-service-major-rs0-2.version-service-major-rs0.version-service-7342.svc.cluster.local:27017,version-service-major-rs0-0.version-service-major-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6c4fb0b6-c083-4449-940c-1c1c8d6ea03f") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.fS8vQ6rfOz + rm /tmp/tmp.dh51egPYyZ /tmp/tmp.fS8vQ6rfOz + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-major-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-major-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oCv7KQfflC +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fxcg6c7d48 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oCv7KQfflC ++ cat /tmp/tmp.Fxcg6c7d48 ++ rm /tmp/tmp.oCv7KQfflC /tmp/tmp.Fxcg6c7d48 ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ myApp:myPass@version-service-major-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EXT6p32rwi ++ mktemp + local LAST_ERR=/tmp/tmp.3sYFAUxGcR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EXT6p32rwi Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-2.version-service-major-rs0.version-service-7342.svc.cluster.local:27017,version-service-major-rs0-0.version-service-major-rs0.version-service-7342.svc.cluster.local:27017,version-service-major-rs0-1.version-service-major-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e5a2011f-8428-47f5-a7c2-f9917ec16e71") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.3sYFAUxGcR + rm /tmp/tmp.EXT6p32rwi /tmp/tmp.3sYFAUxGcR + return 0 + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.RDQpVvZg7Q ++ mktemp + local LAST_ERR=/tmp/tmp.tWqQ6v3p86 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RDQpVvZg7Q + cat /tmp/tmp.tWqQ6v3p86 + rm /tmp/tmp.RDQpVvZg7Q /tmp/tmp.tWqQ6v3p86 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-major-rs0.yml + log 'compare_kubectl: statefulset/version-service-major-rs0 OK' + set +o xtrace [2025-12-04T22:19:04+0000] compare_kubectl: statefulset/version-service-major-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.ipIM7oBuev ++ mktemp + local LAST_ERR=/tmp/tmp.3dEM5aN8Q1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ipIM7oBuev perconaservermongodb.psmdb.percona.com "version-service-major" deleted from version-service-7342 namespace + cat /tmp/tmp.3dEM5aN8Q1 + rm /tmp/tmp.ipIM7oBuev /tmp/tmp.3dEM5aN8Q1 + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.SG4BFt7neo +++ mktemp ++ local LAST_ERR=/tmp/tmp.WkLd5ZlwVv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SG4BFt7neo ++ cat /tmp/tmp.WkLd5ZlwVv ++ rm /tmp/tmp.SG4BFt7neo /tmp/tmp.WkLd5ZlwVv ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-xpn4q pod "percona-server-mongodb-operator-894779dd9-xpn4q" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-unreachable' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-unreachable ----------------------------------------------------------------------------------- + cluster=version-service-unreachable + expected_image=perconalab/percona-server-mongodb-operator:main-mongod8.0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.zoJQm0nrtq ++ mktemp + local LAST_ERR=/tmp/tmp.GbbOb7kQz7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zoJQm0nrtq secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.GbbOb7kQz7 Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.zoJQm0nrtq /tmp/tmp.GbbOb7kQz7 + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.yZS8il8Kdb + sed s%#initImage%perconalab/percona-server-mongodb-operator:PR-1961-5b0cc669%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/conf/version-service-unreachable-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "percona/pmm-client:2.44.1-1" | .spec.backup.image = "percona/percona-backup-mongodb:2.11.0"' /tmp/tmp.yZS8il8Kdb + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.lxqAcxTV5E ++ mktemp + local LAST_ERR=/tmp/tmp.dFtTwguu2q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lxqAcxTV5E perconaservermongodb.psmdb.percona.com/version-service-unreachable created + cat /tmp/tmp.dFtTwguu2q + rm /tmp/tmp.lxqAcxTV5E /tmp/tmp.dFtTwguu2q + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-unreachable-rs0 3 + local name=version-service-unreachable-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-unreachable ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-unreachable-rs0-0 + local pod=version-service-unreachable-rs0-0 + set +o xtrace waiting for pod/version-service-unreachable-rs0-0 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-unreachable-rs0-1 + local pod=version-service-unreachable-rs0-1 + set +o xtrace waiting for pod/version-service-unreachable-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bycg2n6tHp +++ mktemp ++ local LAST_ERR=/tmp/tmp.KG37K1vt1b ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bycg2n6tHp ++ cat /tmp/tmp.KG37K1vt1b ++ rm /tmp/tmp.Bycg2n6tHp /tmp/tmp.KG37K1vt1b ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-unreachable-rs0-2 + local pod=version-service-unreachable-rs0-2 + set +o xtrace waiting for pod/version-service-unreachable-rs0-2 to be ready..........OK ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0reoEDhOCN +++ mktemp ++ local LAST_ERR=/tmp/tmp.IjDvIkzNCI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0reoEDhOCN ++ cat /tmp/tmp.IjDvIkzNCI ++ rm /tmp/tmp.0reoEDhOCN /tmp/tmp.IjDvIkzNCI ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5rzl3wOkPm +++ mktemp ++ local LAST_ERR=/tmp/tmp.s5Lk4VFRTU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5rzl3wOkPm ++ cat /tmp/tmp.s5Lk4VFRTU ++ rm /tmp/tmp.5rzl3wOkPm /tmp/tmp.s5Lk4VFRTU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.jbwN0ZjcFf ++ mktemp + local LAST_ERR=/tmp/tmp.Q8Tm8mOziw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jbwN0ZjcFf + cat /tmp/tmp.Q8Tm8mOziw + rm /tmp/tmp.jbwN0ZjcFf /tmp/tmp.Q8Tm8mOziw + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + log 'compare_kubectl: statefulset/version-service-unreachable-rs0 OK' + set +o xtrace [2025-12-04T22:21:16+0000] compare_kubectl: statefulset/version-service-unreachable-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7342 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wKwKjBt9jA +++ mktemp ++ local LAST_ERR=/tmp/tmp.56cJb3qRqP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wKwKjBt9jA ++ cat /tmp/tmp.56cJb3qRqP ++ rm /tmp/tmp.wKwKjBt9jA /tmp/tmp.56cJb3qRqP ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.uZdFgZGaqr ++ mktemp + local LAST_ERR=/tmp/tmp.v7CTl16GhC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uZdFgZGaqr Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-7342.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-7342.svc.cluster.local:27017,version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d3155954-dabc-4130-b0b8-396cbf2fb4ea") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.v7CTl16GhC + rm /tmp/tmp.uZdFgZGaqr /tmp/tmp.v7CTl16GhC + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-unreachable-rs0.version-service-7342 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-unreachable-rs0.version-service-7342 + local driver=mongodb+srv + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T9O8tmEYD2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KhWllMBh5r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T9O8tmEYD2 ++ cat /tmp/tmp.KhWllMBh5r ++ rm /tmp/tmp.T9O8tmEYD2 /tmp/tmp.KhWllMBh5r ++ return 0 + local client_container=psmdb-client-696897d69b-x5jmw + local mongo_flag= + [[ myApp:myPass@version-service-unreachable-rs0.version-service-7342 == *cfg* ]] + replica_set=rs0 + kubectl_bin exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.lRwEQ98OQD ++ mktemp + local LAST_ERR=/tmp/tmp.KrteDnQdpp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-x5jmw -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-7342.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lRwEQ98OQD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-7342.svc.cluster.local:27017,version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-7342.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-7342.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("fa8e45a1-d86d-40a0-9407-d9af457f6c8a") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.KrteDnQdpp + rm /tmp/tmp.lRwEQ98OQD /tmp/tmp.KrteDnQdpp + return 0 + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-7342", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.QXmv44swd4 ++ mktemp + local LAST_ERR=/tmp/tmp.hnTHNQIODf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QXmv44swd4 + cat /tmp/tmp.hnTHNQIODf + rm /tmp/tmp.QXmv44swd4 /tmp/tmp.hnTHNQIODf + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.4XGlLxXGvd/statefulset_version-service-unreachable-rs0.yml + log 'compare_kubectl: statefulset/version-service-unreachable-rs0 OK' + set +o xtrace [2025-12-04T22:21:22+0000] compare_kubectl: statefulset/version-service-unreachable-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod8.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod8.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod8.0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod8.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod8.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod8.0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=perconalab/percona-server-mongodb-operator:main-mongod8.0 + '[' perconalab/percona-server-mongodb-operator:main-mongod8.0 '!=' perconalab/percona-server-mongodb-operator:main-mongod8.0 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.i0w7n5Kpuq ++ mktemp + local LAST_ERR=/tmp/tmp.BrDrR6dEmN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i0w7n5Kpuq perconaservermongodb.psmdb.percona.com "version-service-unreachable" deleted from version-service-7342 namespace + cat /tmp/tmp.BrDrR6dEmN + rm /tmp/tmp.i0w7n5Kpuq /tmp/tmp.BrDrR6dEmN + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.PY9OLfJPVR +++ mktemp ++ local LAST_ERR=/tmp/tmp.6PjwLNVDjn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PY9OLfJPVR ++ cat /tmp/tmp.6PjwLNVDjn ++ rm /tmp/tmp.PY9OLfJPVR /tmp/tmp.6PjwLNVDjn ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-894779dd9-srkjf pod "percona-server-mongodb-operator-894779dd9-srkjf" deleted from psmdb-operator namespace + sleep 10 + destroy version-service-7342 + local namespace=version-service-7342 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.VudnTUWa0A +++ mktemp ++ local LAST_ERR=/tmp/tmp.IZYSnMZaZB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VudnTUWa0A ++ cat /tmp/tmp.IZYSnMZaZB No resources found in version-service-7342 namespace. ++ rm /tmp/tmp.VudnTUWa0A /tmp/tmp.IZYSnMZaZB ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.OJK6oei8mK ++ mktemp + local LAST_ERR=/tmp/tmp.mmipgiqmPR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OJK6oei8mK customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.mmipgiqmPR + rm /tmp/tmp.OJK6oei8mK /tmp/tmp.mmipgiqmPR + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XX24JoECk2 ++ mktemp + local LAST_ERR=/tmp/tmp.1eHcane3H6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XX24JoECk2 + cat /tmp/tmp.1eHcane3H6 + rm /tmp/tmp.XX24JoECk2 /tmp/tmp.1eHcane3H6 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.kuBmnUe6yy ++ mktemp + local LAST_ERR=/tmp/tmp.r9q89fkWeO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kuBmnUe6yy + cat /tmp/tmp.r9q89fkWeO + rm /tmp/tmp.kuBmnUe6yy /tmp/tmp.r9q89fkWeO + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.eWbjmqSyBp ++ mktemp + local LAST_ERR=/tmp/tmp.8UFcxpdaAE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eWbjmqSyBp + cat /tmp/tmp.8UFcxpdaAE + rm /tmp/tmp.eWbjmqSyBp /tmp/tmp.8UFcxpdaAE + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Z6iMFM8sm8 ++ mktemp + local LAST_ERR=/tmp/tmp.69eUokSEyV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1961/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z6iMFM8sm8 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.69eUokSEyV + rm /tmp/tmp.Z6iMFM8sm8 /tmp/tmp.69eUokSEyV + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TOYVLwsNFk ++ mktemp + local LAST_ERR=/tmp/tmp.2DOt9ROjKo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TOYVLwsNFk + cat /tmp/tmp.2DOt9ROjKo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TOYVLwsNFk + cat /tmp/tmp.2DOt9ROjKo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TOYVLwsNFk + cat /tmp/tmp.2DOt9ROjKo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.TOYVLwsNFk + cat /tmp/tmp.2DOt9ROjKo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.TOYVLwsNFk /tmp/tmp.2DOt9ROjKo + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace version-service-7342 + rm -rf /tmp/tmp.4XGlLxXGvd + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.nyxh10qlFm + local LAST_OUT=/tmp/tmp.DY5TnqO3Lb + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.UjLhQ84DRX + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.WxtetLZqVG + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + for i in $(seq 0 2) + kubectl delete --grace-period=0 --force=true namespace version-service-7342 + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator