Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/logs/version-service.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra version-service-13414 + local ns=version-service-13414 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.GnpWd62Ghe ++ mktemp + local LAST_ERR=/tmp/tmp.sXkbTGyoxq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GnpWd62Ghe customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.sXkbTGyoxq + rm /tmp/tmp.GnpWd62Ghe /tmp/tmp.sXkbTGyoxq + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qRK8QLX9oD ++ mktemp + local LAST_ERR=/tmp/tmp.R7LpRi7Guv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qRK8QLX9oD + cat /tmp/tmp.R7LpRi7Guv + rm /tmp/tmp.qRK8QLX9oD /tmp/tmp.R7LpRi7Guv + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.3m0VDoRMJO ++ mktemp + local LAST_ERR=/tmp/tmp.5g86AdAOnd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3m0VDoRMJO + cat /tmp/tmp.5g86AdAOnd + rm /tmp/tmp.3m0VDoRMJO /tmp/tmp.5g86AdAOnd + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0FlxfJJhv2 ++ mktemp + local LAST_ERR=/tmp/tmp.veL1OetvZj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0FlxfJJhv2 + cat /tmp/tmp.veL1OetvZj + rm /tmp/tmp.0FlxfJJhv2 /tmp/tmp.veL1OetvZj + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qP4WG7IExW ++ mktemp + local LAST_ERR=/tmp/tmp.pZ60QN8jkm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qP4WG7IExW clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.pZ60QN8jkm + rm /tmp/tmp.qP4WG7IExW /tmp/tmp.pZ60QN8jkm + return 0 + check_crd_for_deletion PR-2219-d7e802db + local git_tag=PR-2219-d7e802db ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2219-d7e802db/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zhRsiuq6kw +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZwtnekTgD0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zhRsiuq6kw ++ cat /tmp/tmp.ZwtnekTgD0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zhRsiuq6kw ++ cat /tmp/tmp.ZwtnekTgD0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zhRsiuq6kw ++ cat /tmp/tmp.ZwtnekTgD0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.zhRsiuq6kw ++ cat /tmp/tmp.ZwtnekTgD0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.zhRsiuq6kw /tmp/tmp.ZwtnekTgD0 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.DXRZDC20Ti ++ mktemp + local LAST_OUT=/tmp/tmp.MrAiThRTrT ++ mktemp + local LAST_ERR=/tmp/tmp.aJyX6DME8D + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.ta9R4KEWPx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + for i in $(seq 0 2) + kubectl get ns + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DXRZDC20Ti + cat /tmp/tmp.aJyX6DME8D + rm /tmp/tmp.DXRZDC20Ti /tmp/tmp.aJyX6DME8D + return 0 namespace "version-service-26391" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MrAiThRTrT namespace "psmdb-operator" deleted + cat /tmp/tmp.ta9R4KEWPx + rm /tmp/tmp.MrAiThRTrT /tmp/tmp.ta9R4KEWPx + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.0DxWOLXyuw ++ mktemp + local LAST_ERR=/tmp/tmp.RAw7xRYFAe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0DxWOLXyuw + cat /tmp/tmp.RAw7xRYFAe + rm /tmp/tmp.0DxWOLXyuw /tmp/tmp.RAw7xRYFAe + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fZKt0vgWYh ++ mktemp + local LAST_ERR=/tmp/tmp.34wV6pj5wd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fZKt0vgWYh namespace/psmdb-operator created + cat /tmp/tmp.34wV6pj5wd + rm /tmp/tmp.fZKt0vgWYh /tmp/tmp.34wV6pj5wd + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.qkLrDjmxz3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Gr2jkehhUE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qkLrDjmxz3 ++ cat /tmp/tmp.Gr2jkehhUE ++ rm /tmp/tmp.qkLrDjmxz3 /tmp/tmp.Gr2jkehhUE ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.OvwFB3fRkR ++ mktemp + local LAST_ERR=/tmp/tmp.U9SpgA1mLe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OvwFB3fRkR Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster7" modified. + cat /tmp/tmp.U9SpgA1mLe + rm /tmp/tmp.OvwFB3fRkR /tmp/tmp.U9SpgA1mLe + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.S0F7p9lpya ++ mktemp + local LAST_ERR=/tmp/tmp.XNq9H2GQZB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S0F7p9lpya customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.XNq9H2GQZB + rm /tmp/tmp.S0F7p9lpya /tmp/tmp.XNq9H2GQZB + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.0pEDPJLVun ++ mktemp + local LAST_ERR=/tmp/tmp.kRKu9gj3iB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0pEDPJLVun clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.kRKu9gj3iB + rm /tmp/tmp.0pEDPJLVun /tmp/tmp.kRKu9gj3iB + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ijjLn4SWjR ++ mktemp + local LAST_ERR=/tmp/tmp.CKcri1QN6M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ijjLn4SWjR deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.CKcri1QN6M + rm /tmp/tmp.ijjLn4SWjR /tmp/tmp.CKcri1QN6M + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.3tY2iq5Jow +++ mktemp ++ local LAST_ERR=/tmp/tmp.ojQh9tSvlP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3tY2iq5Jow ++ cat /tmp/tmp.ojQh9tSvlP ++ rm /tmp/tmp.3tY2iq5Jow /tmp/tmp.ojQh9tSvlP ++ return 0 + wait_operator_pod percona-server-mongodb-operator-76758dcf9-9fzbb + local pod=percona-server-mongodb-operator-76758dcf9-9fzbb + set +o xtrace waiting for pod/percona-server-mongodb-operator-76758dcf9-9fzbb to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.HBxPCVIQbx +++ mktemp ++ local LAST_ERR=/tmp/tmp.ylbfUP3Ers ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HBxPCVIQbx ++ cat /tmp/tmp.ylbfUP3Ers ++ rm /tmp/tmp.HBxPCVIQbx /tmp/tmp.ylbfUP3Ers ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-76758dcf9-9fzbb ++ mktemp + local LAST_OUT=/tmp/tmp.3M4nBhytg5 ++ mktemp + local LAST_ERR=/tmp/tmp.UkKFXfzgPp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-76758dcf9-9fzbb + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3M4nBhytg5 + cat /tmp/tmp.UkKFXfzgPp + rm /tmp/tmp.3M4nBhytg5 /tmp/tmp.UkKFXfzgPp + return 0 2026-03-11T00:00:40.160Z INFO setup Manager starting up {"gitCommit": "d7e802db10c9b2b2028f56c7b5227cb276fe5878", "gitBranch": "PR-2219-d7e802db", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace version-service-13414 + local namespace=version-service-13414 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + '[' -n '' ']' + desc 'cleaned up old namespaces version-service-13414' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces version-service-13414 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace version-service-13414 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.fgsNGqiyem ++ mktemp + local LAST_OUT=/tmp/tmp.hOLo5rWF2c + local LAST_ERR=/tmp/tmp.E6jmlyjFwe + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.jhQih1VZxc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace version-service-13414 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fgsNGqiyem + cat /tmp/tmp.E6jmlyjFwe + rm /tmp/tmp.fgsNGqiyem /tmp/tmp.E6jmlyjFwe + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hOLo5rWF2c + cat /tmp/tmp.jhQih1VZxc + rm /tmp/tmp.hOLo5rWF2c /tmp/tmp.jhQih1VZxc + return 0 + kubectl_bin wait --for=delete namespace version-service-13414 ++ mktemp + local LAST_OUT=/tmp/tmp.qjfuZIQ0N1 ++ mktemp + local LAST_ERR=/tmp/tmp.UUxxWUokpw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace version-service-13414 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qjfuZIQ0N1 + cat /tmp/tmp.UUxxWUokpw + rm /tmp/tmp.qjfuZIQ0N1 /tmp/tmp.UUxxWUokpw + return 0 + desc 'create namespace version-service-13414' + set +o xtrace ----------------------------------------------------------------------------------- create namespace version-service-13414 ----------------------------------------------------------------------------------- + kubectl_bin create namespace version-service-13414 ++ mktemp + local LAST_OUT=/tmp/tmp.4xR4bOevHF ++ mktemp + local LAST_ERR=/tmp/tmp.ZXkAkbTj5G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace version-service-13414 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4xR4bOevHF namespace/version-service-13414 created + cat /tmp/tmp.ZXkAkbTj5G + rm /tmp/tmp.4xR4bOevHF /tmp/tmp.ZXkAkbTj5G + return 0 + set_kube_ctx version-service-13414 + local namespace=version-service-13414 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.U1tZU6mB0S +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Bv62zhsUi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U1tZU6mB0S ++ cat /tmp/tmp.4Bv62zhsUi ++ rm /tmp/tmp.U1tZU6mB0S /tmp/tmp.4Bv62zhsUi ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster7 --namespace=version-service-13414 ++ mktemp + local LAST_OUT=/tmp/tmp.nsyf084TiB ++ mktemp + local LAST_ERR=/tmp/tmp.Xo2WZw5vXY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster7 --namespace=version-service-13414 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nsyf084TiB Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster7" modified. + cat /tmp/tmp.Xo2WZw5vXY + rm /tmp/tmp.nsyf084TiB /tmp/tmp.Xo2WZw5vXY + return 0 + desc 'install version service' + set +o xtrace ----------------------------------------------------------------------------------- install version service ----------------------------------------------------------------------------------- + kubectl_bin create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json ++ mktemp + local LAST_OUT=/tmp/tmp.ZtAf77SrfP ++ mktemp + local LAST_ERR=/tmp/tmp.EDabvDPxbp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create configmap -n psmdb-operator versions --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.dep.json --from-file /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/operator.9.9.9.psmdb-operator.json + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZtAf77SrfP configmap/versions created + cat /tmp/tmp.EDabvDPxbp + rm /tmp/tmp.ZtAf77SrfP /tmp/tmp.EDabvDPxbp + return 0 + kubectl_bin apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/vs.yml ++ mktemp + local LAST_OUT=/tmp/tmp.tAOTXHE1Xv ++ mktemp + local LAST_ERR=/tmp/tmp.M3X6kYxg90 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/vs.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tAOTXHE1Xv deployment.apps/version-service created service/version-service created + cat /tmp/tmp.M3X6kYxg90 + rm /tmp/tmp.tAOTXHE1Xv /tmp/tmp.M3X6kYxg90 + return 0 + sleep 10 + yq eval '(.. | select(tag == "!!str")) |= sub("version-service$", "version-service-cr")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/vs.yml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Dfd80H1xDy ++ mktemp + local LAST_ERR=/tmp/tmp.GQLxDuXOY5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Dfd80H1xDy deployment.apps/version-service-cr created service/version-service-cr created + cat /tmp/tmp.GQLxDuXOY5 + rm /tmp/tmp.Dfd80H1xDy /tmp/tmp.GQLxDuXOY5 + return 0 + kubectl_bin -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 ++ mktemp + local LAST_OUT=/tmp/tmp.cr2tTdvhsO ++ mktemp + local LAST_ERR=/tmp/tmp.8szxiu5cFT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n psmdb-operator set env deploy/percona-server-mongodb-operator PERCONA_VS_FALLBACK_URI=http://version-service:11000 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cr2tTdvhsO deployment.apps/percona-server-mongodb-operator env updated + cat /tmp/tmp.8szxiu5cFT + rm /tmp/tmp.cr2tTdvhsO /tmp/tmp.8szxiu5cFT + return 0 + sleep 30 + desc 'enable telemetry on operator level' + set +o xtrace ----------------------------------------------------------------------------------- enable telemetry on operator level ----------------------------------------------------------------------------------- + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "false"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.bOzJgA9Wb1 + local LAST_OUT=/tmp/tmp.qwbDVTpBtz ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.5GRcn0cCax + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.IVg1SB2RaO + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qwbDVTpBtz + cat /tmp/tmp.IVg1SB2RaO + rm /tmp/tmp.qwbDVTpBtz /tmp/tmp.IVg1SB2RaO + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bOzJgA9Wb1 deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.5GRcn0cCax + rm /tmp/tmp.bOzJgA9Wb1 /tmp/tmp.5GRcn0cCax + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.t3Bf6xr5v6 ++ mktemp + local LAST_ERR=/tmp/tmp.lgyA85YKlC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t3Bf6xr5v6 + cat /tmp/tmp.lgyA85YKlC + rm /tmp/tmp.t3Bf6xr5v6 /tmp/tmp.lgyA85YKlC + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.mVTPylmncP +++ mktemp ++ local LAST_ERR=/tmp/tmp.wyG1nj2K2c ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mVTPylmncP ++ cat /tmp/tmp.wyG1nj2K2c ++ rm /tmp/tmp.mVTPylmncP /tmp/tmp.wyG1nj2K2c ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.IIsxkbzFj7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PA5Sm64TdC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IIsxkbzFj7 ++ cat /tmp/tmp.PA5Sm64TdC ++ rm /tmp/tmp.IIsxkbzFj7 /tmp/tmp.PA5Sm64TdC ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 disabled enabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=enabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.ZiABAm4vas ++ mktemp + local LAST_ERR=/tmp/tmp.Q8eWwo5e4m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZiABAm4vas deployment.apps/psmdb-client created + cat /tmp/tmp.Q8eWwo5e4m + rm /tmp/tmp.ZiABAm4vas /tmp/tmp.Q8eWwo5e4m + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Qp00bIdbGd ++ mktemp + local LAST_ERR=/tmp/tmp.GyyYn63sVI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qp00bIdbGd secret/minimal-cluster created + cat /tmp/tmp.GyyYn63sVI + rm /tmp/tmp.Qp00bIdbGd /tmp/tmp.GyyYn63sVI + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db" | .spec.crVersion = "9.9.9" | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cr-minimal.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ISsyP1IygY ++ mktemp + local LAST_ERR=/tmp/tmp.3Hti8JhH72 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ISsyP1IygY perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.3Hti8JhH72 + rm /tmp/tmp.ISsyP1IygY /tmp/tmp.3Hti8JhH72 + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k0bjvU26yC +++ mktemp ++ local LAST_ERR=/tmp/tmp.i0FIuZwuvH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k0bjvU26yC ++ cat /tmp/tmp.i0FIuZwuvH ++ rm /tmp/tmp.k0bjvU26yC /tmp/tmp.i0FIuZwuvH ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready.................OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wohLlt7Zwz +++ mktemp ++ local LAST_ERR=/tmp/tmp.s6eKVaq4gK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wohLlt7Zwz ++ cat /tmp/tmp.s6eKVaq4gK ++ rm /tmp/tmp.wohLlt7Zwz /tmp/tmp.s6eKVaq4gK ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uwj1f1Txqa +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yph1moUDgr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Uwj1f1Txqa ++ cat /tmp/tmp.Yph1moUDgr ++ rm /tmp/tmp.Uwj1f1Txqa /tmp/tmp.Yph1moUDgr ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................ + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XvjXoWkrXn +++ mktemp ++ local LAST_ERR=/tmp/tmp.lIQjajbbv8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XvjXoWkrXn ++ cat /tmp/tmp.lIQjajbbv8 ++ rm /tmp/tmp.XvjXoWkrXn /tmp/tmp.lIQjajbbv8 ++ return 0 + local client_container=psmdb-client-bb8b97679-wbx82 + kubectl_bin exec psmdb-client-bb8b97679-wbx82 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.LzM3YKotnz ++ mktemp + local LAST_ERR=/tmp/tmp.7A8WPbMKL0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-wbx82 -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LzM3YKotnz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("a6024d49-aa11-4bee-b93f-6331e409db4b") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.7A8WPbMKL0 + rm /tmp/tmp.LzM3YKotnz /tmp/tmp.7A8WPbMKL0 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@minimal-cluster-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UsqFQqzQVw +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZaWoiORXKH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UsqFQqzQVw ++ cat /tmp/tmp.ZaWoiORXKH ++ rm /tmp/tmp.UsqFQqzQVw /tmp/tmp.ZaWoiORXKH ++ return 0 + local client_container=psmdb-client-bb8b97679-wbx82 + kubectl_bin exec psmdb-client-bb8b97679-wbx82 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UdqCXkBI3p ++ mktemp + local LAST_ERR=/tmp/tmp.MEZKXWkRnw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-wbx82 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UdqCXkBI3p Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ad606336-c959-40f3-aa31-a9c1155a4936") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.MEZKXWkRnw + rm /tmp/tmp.UdqCXkBI3p /tmp/tmp.MEZKXWkRnw + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-7db489d9c4-wlrdk -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.o4aic5arBi ++ mktemp + local LAST_ERR=/tmp/tmp.bKkVUuxhfN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-7db489d9c4-wlrdk -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o4aic5arBi + cat /tmp/tmp.bKkVUuxhfN + rm /tmp/tmp.o4aic5arBi /tmp/tmp.bKkVUuxhfN + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-96477994-ckfqh -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.CK4P4mPVQf ++ mktemp + local LAST_ERR=/tmp/tmp.KScXnR9Dcy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-96477994-ckfqh -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CK4P4mPVQf + cat /tmp/tmp.KScXnR9Dcy + rm /tmp/tmp.CK4P4mPVQf /tmp/tmp.KScXnR9Dcy + return 0 + local telemetry_log_file=enabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == enabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.mfX3wGCZe1/enabled_telemetry.version-service.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/enabled_telemetry.version-service.log-cw.json + [[ -s /tmp/tmp.mfX3wGCZe1/enabled_telemetry.version-service-cr.log.json ]] + local telemetry_cr_log_file=enabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a enabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a enabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.weyKgM5BSh +++ mktemp ++ local LAST_ERR=/tmp/tmp.k8Pq6cu9NM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.weyKgM5BSh ++ cat /tmp/tmp.k8Pq6cu9NM ++ rm /tmp/tmp.weyKgM5BSh /tmp/tmp.k8Pq6cu9NM ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-7bbbc5d9df-cb9rc ++ mktemp + local LAST_OUT=/tmp/tmp.3jajkUS5MP ++ mktemp + local LAST_ERR=/tmp/tmp.3HCDfaYfG9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-7bbbc5d9df-cb9rc + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3jajkUS5MP pod "percona-server-mongodb-operator-7bbbc5d9df-cb9rc" deleted from psmdb-operator namespace + cat /tmp/tmp.3HCDfaYfG9 + rm /tmp/tmp.3jajkUS5MP /tmp/tmp.3HCDfaYfG9 + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.De8AgHQ1g5 ++ mktemp + local LAST_ERR=/tmp/tmp.YammEnArYF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.De8AgHQ1g5 perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.YammEnArYF + rm /tmp/tmp.De8AgHQ1g5 /tmp/tmp.YammEnArYF + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.5d6K5iERMj ++ mktemp + local LAST_ERR=/tmp/tmp.nWU9vxPwbY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5d6K5iERMj perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-13414 namespace + cat /tmp/tmp.nWU9vxPwbY + rm /tmp/tmp.5d6K5iERMj /tmp/tmp.nWU9vxPwbY + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.Udt2avlGCY ++ mktemp + local LAST_ERR=/tmp/tmp.lRssYTNdYM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Udt2avlGCY deployment.apps "psmdb-client" deleted from version-service-13414 namespace + cat /tmp/tmp.lRssYTNdYM + rm /tmp/tmp.Udt2avlGCY /tmp/tmp.lRssYTNdYM + return 0 + sleep 30 + desc 'disabling telemetry on the operator level' + set +o xtrace ----------------------------------------------------------------------------------- disabling telemetry on the operator level ----------------------------------------------------------------------------------- + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.tiIVcvVaiS ++ mktemp + local LAST_ERR=/tmp/tmp.P4Nui2O72Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tiIVcvVaiS pod "version-service-cr-7db489d9c4-wlrdk" deleted from psmdb-operator namespace + cat /tmp/tmp.P4Nui2O72Q + rm /tmp/tmp.tiIVcvVaiS /tmp/tmp.P4Nui2O72Q + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.aEvNpQd7iR ++ mktemp + local LAST_ERR=/tmp/tmp.ttzmbT0Z2N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aEvNpQd7iR pod "version-service-96477994-ckfqh" deleted from psmdb-operator namespace + cat /tmp/tmp.ttzmbT0Z2N + rm /tmp/tmp.aEvNpQd7iR /tmp/tmp.ttzmbT0Z2N + return 0 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.3FoiQWhPyF + local LAST_OUT=/tmp/tmp.acoc7ZJ8Cl ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.mLgZHxlZIV + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.96aWIrSYiv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3FoiQWhPyF + cat /tmp/tmp.mLgZHxlZIV + rm /tmp/tmp.3FoiQWhPyF /tmp/tmp.mLgZHxlZIV + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.acoc7ZJ8Cl deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.96aWIrSYiv + rm /tmp/tmp.acoc7ZJ8Cl /tmp/tmp.96aWIrSYiv + return 0 ++ get_mongod_ver_from_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 ++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ run_simple_cli_inside_image docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 'mongod --version' +++ local image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 +++ local 'cli=mongod --version' +++ local pod_name=5341 +++ kubectl_bin -n default run 5341 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ /usr/sbin/sed -r 's/^.*db version v(([0-9]+\.){2}[0-9]+-[0-9]+).*$/\1/g' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wJ6QCtbB6o ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wgdFMrKf3k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default run 5341 --image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 --restart=Never --command -- sleep infinity +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wJ6QCtbB6o +++ cat /tmp/tmp.wgdFMrKf3k +++ rm /tmp/tmp.wJ6QCtbB6o /tmp/tmp.wgdFMrKf3k +++ return 0 +++ kubectl_bin -n default wait --for=condition=Ready pod/5341 ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mNsXYTmuoM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IcXa0HADjU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default wait --for=condition=Ready pod/5341 +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mNsXYTmuoM +++ cat /tmp/tmp.IcXa0HADjU +++ rm /tmp/tmp.mNsXYTmuoM /tmp/tmp.IcXa0HADjU +++ return 0 ++++ kubectl_bin -n default exec 5341 -- bash -c 'mongod --version 2>&1' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.4JT9R6vnrv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.TDKYBcBrzi ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl -n default exec 5341 -- bash -c 'mongod --version 2>&1' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.4JT9R6vnrv ++++ cat /tmp/tmp.TDKYBcBrzi ++++ rm /tmp/tmp.4JT9R6vnrv /tmp/tmp.TDKYBcBrzi ++++ return 0 +++ local 'output=db version v8.0.19-7 Build Info: { "version": "8.0.19-7", "gitVersion": "902b12dbea631904a8175499a56c8bb95b708483", "openSSLVersion": "OpenSSL 3.5.1 1 Jul 2025", "modules": [], "perconaFeatures": [ "MemoryEngine", "HotBackup", "BackupCursorAggregationStage", "BackupCursorExtendAggregationStage", "AWSIAM", "Kerberos", "LDAP", "OIDC", "TDE", "FIPSMode", "FCBIS", "Auditing", "ProfilingRateLimit", "LogRedaction", "ngram" ], "allocator": "tcmalloc-google", "environment": { "distarch": "x86_64", "target_arch": "x86_64" } }' +++ kubectl_bin -n default delete pod/5341 --grace-period=0 --force ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CDO2IhFeey ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xcYnI9a5sc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl -n default delete pod/5341 --grace-period=0 --force +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CDO2IhFeey +++ cat /tmp/tmp.xcYnI9a5sc Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. +++ rm /tmp/tmp.CDO2IhFeey /tmp/tmp.xcYnI9a5sc +++ return 0 +++ echo db version v8.0.19-7 Build Info: '{' '"version":' '"8.0.19-7",' '"gitVersion":' '"902b12dbea631904a8175499a56c8bb95b708483",' '"openSSLVersion":' '"OpenSSL' 3.5.1 1 Jul '2025",' '"modules":' '[],' '"perconaFeatures":' '[' '"MemoryEngine",' '"HotBackup",' '"BackupCursorAggregationStage",' '"BackupCursorExtendAggregationStage",' '"AWSIAM",' '"Kerberos",' '"LDAP",' '"OIDC",' '"TDE",' '"FIPSMode",' '"FCBIS",' '"Auditing",' '"ProfilingRateLimit",' '"LogRedaction",' '"ngram"' '],' '"allocator":' '"tcmalloc-google",' '"environment":' '{' '"distarch":' '"x86_64",' '"target_arch":' '"x86_64"' '}' '}' ++ version_info=8.0.19-7 ++ [[ ! 8.0.19-7 =~ ^([0-9]+\.){2}[0-9]+-[0-9]+$ ]] ++ echo 8.0.19-7 + ACTUAL_MONGOD_VERSION=8.0.19-7 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.KL1ytvxZHj ++ mktemp + local LAST_ERR=/tmp/tmp.7i6eoY9Tgw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KL1ytvxZHj + cat /tmp/tmp.7i6eoY9Tgw + rm /tmp/tmp.KL1ytvxZHj /tmp/tmp.7i6eoY9Tgw + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.PwiDyJjvSV +++ mktemp ++ local LAST_ERR=/tmp/tmp.jf7TpyyWPw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PwiDyJjvSV ++ cat /tmp/tmp.jf7TpyyWPw ++ rm /tmp/tmp.PwiDyJjvSV /tmp/tmp.jf7TpyyWPw ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.DJd2lYy27g +++ mktemp ++ local LAST_ERR=/tmp/tmp.6RdZiyAfAe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DJd2lYy27g ++ cat /tmp/tmp.6RdZiyAfAe ++ rm /tmp/tmp.DJd2lYy27g /tmp/tmp.6RdZiyAfAe ++ return 0 + '[' 1 == 1 ']' + check_telemetry_transfer http://version-service-cr:11000 8.0-recommended disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=8.0-recommended + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.IX0RwULLYD ++ mktemp + local LAST_ERR=/tmp/tmp.0z4MfYf5mm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IX0RwULLYD deployment.apps/psmdb-client created + cat /tmp/tmp.0z4MfYf5mm + rm /tmp/tmp.IX0RwULLYD /tmp/tmp.0z4MfYf5mm + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.dUCP360ZAG ++ mktemp + local LAST_ERR=/tmp/tmp.pdCJs11uyo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dUCP360ZAG secret/minimal-cluster configured + cat /tmp/tmp.pdCJs11uyo Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.dUCP360ZAG /tmp/tmp.pdCJs11uyo + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "8.0-recommended" | .spec.initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db" | .spec.crVersion = "9.9.9" | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.3zFeD2sZO3 ++ mktemp + local LAST_ERR=/tmp/tmp.rb9DxkwoP9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3zFeD2sZO3 perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.rb9DxkwoP9 + rm /tmp/tmp.3zFeD2sZO3 /tmp/tmp.rb9DxkwoP9 + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PFifLyd3ax +++ mktemp ++ local LAST_ERR=/tmp/tmp.Z4hauCNT7e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PFifLyd3ax ++ cat /tmp/tmp.Z4hauCNT7e ++ rm /tmp/tmp.PFifLyd3ax /tmp/tmp.Z4hauCNT7e ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready......................OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qe1NcZNclu +++ mktemp ++ local LAST_ERR=/tmp/tmp.lDT8WCJjgU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qe1NcZNclu ++ cat /tmp/tmp.lDT8WCJjgU ++ rm /tmp/tmp.qe1NcZNclu /tmp/tmp.lDT8WCJjgU ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9XJ5YRq4MB +++ mktemp ++ local LAST_ERR=/tmp/tmp.M5ULht2bPa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9XJ5YRq4MB ++ cat /tmp/tmp.M5ULht2bPa ++ rm /tmp/tmp.9XJ5YRq4MB /tmp/tmp.M5ULht2bPa ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................ + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.myHCraVqb0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.oD6Zy7JOkn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.myHCraVqb0 ++ cat /tmp/tmp.oD6Zy7JOkn ++ rm /tmp/tmp.myHCraVqb0 /tmp/tmp.oD6Zy7JOkn ++ return 0 + local client_container=psmdb-client-bb8b97679-2tj2c + kubectl_bin exec psmdb-client-bb8b97679-2tj2c -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fCJT2AjsN0 ++ mktemp + local LAST_ERR=/tmp/tmp.BsQbfF2bCb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-2tj2c -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fCJT2AjsN0 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("8c38744c-d093-445e-aa45-88e372472272") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.BsQbfF2bCb + rm /tmp/tmp.fCJT2AjsN0 /tmp/tmp.BsQbfF2bCb + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@minimal-cluster-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dAty7tRzhc +++ mktemp ++ local LAST_ERR=/tmp/tmp.5lX3KNNxyl ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dAty7tRzhc ++ cat /tmp/tmp.5lX3KNNxyl ++ rm /tmp/tmp.dAty7tRzhc /tmp/tmp.5lX3KNNxyl ++ return 0 + local client_container=psmdb-client-bb8b97679-2tj2c + kubectl_bin exec psmdb-client-bb8b97679-2tj2c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RDXiCb7fKr ++ mktemp + local LAST_ERR=/tmp/tmp.GZpEp3Ccvr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-2tj2c -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RDXiCb7fKr Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("abc806bd-90c2-426d-a5a7-5c7e221925c9") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.GZpEp3Ccvr + rm /tmp/tmp.RDXiCb7fKr /tmp/tmp.GZpEp3Ccvr + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-7db489d9c4-w5l98 -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.vQIL9lxkRz ++ mktemp + local LAST_ERR=/tmp/tmp.qFfZskbnFO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-7db489d9c4-w5l98 -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vQIL9lxkRz + cat /tmp/tmp.qFfZskbnFO + rm /tmp/tmp.vQIL9lxkRz /tmp/tmp.qFfZskbnFO + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-96477994-h78kz -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.64aZyJSSwn ++ mktemp + local LAST_ERR=/tmp/tmp.BmF8HKxwmk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-96477994-h78kz -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.64aZyJSSwn + cat /tmp/tmp.BmF8HKxwmk + rm /tmp/tmp.64aZyJSSwn /tmp/tmp.BmF8HKxwmk + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' 8.0-recommended == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=8.0 + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' 8.0-recommended == 8.0-recommended -a disabled == disabled ']' + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json /dev/fd/63 ++ grep -f /tmp/tmp.mfX3wGCZe1/disabled_telemetry.version-service-cr.log.json /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/disabled_telemetry.version-service-cr.log-cw.json + [[ -s /tmp/tmp.mfX3wGCZe1/disabled_telemetry.version-service.log.json ]] + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' 8.0-recommended == disabled -a disabled == disabled ']' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.lBXOmEjMLw +++ mktemp ++ local LAST_ERR=/tmp/tmp.jckV9MvBrC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lBXOmEjMLw ++ cat /tmp/tmp.jckV9MvBrC ++ rm /tmp/tmp.lBXOmEjMLw /tmp/tmp.jckV9MvBrC ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-6z8q6 ++ mktemp + local LAST_OUT=/tmp/tmp.Wk0KvPefK4 ++ mktemp + local LAST_ERR=/tmp/tmp.iqdmDeZzJk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-6z8q6 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Wk0KvPefK4 pod "percona-server-mongodb-operator-5bd97c4cbd-6z8q6" deleted from psmdb-operator namespace + cat /tmp/tmp.iqdmDeZzJk + rm /tmp/tmp.Wk0KvPefK4 /tmp/tmp.iqdmDeZzJk + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.QlFXxB22fv ++ mktemp + local LAST_ERR=/tmp/tmp.WEhlfYTd1j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QlFXxB22fv perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.WEhlfYTd1j + rm /tmp/tmp.QlFXxB22fv /tmp/tmp.WEhlfYTd1j + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.dzT8BjLHqG ++ mktemp + local LAST_ERR=/tmp/tmp.O4WP8tah1g + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dzT8BjLHqG perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-13414 namespace + cat /tmp/tmp.O4WP8tah1g + rm /tmp/tmp.dzT8BjLHqG /tmp/tmp.O4WP8tah1g + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.xz398N8464 ++ mktemp + local LAST_ERR=/tmp/tmp.dx5eIwogx4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xz398N8464 deployment.apps "psmdb-client" deleted from version-service-13414 namespace + cat /tmp/tmp.dx5eIwogx4 + rm /tmp/tmp.xz398N8464 /tmp/tmp.dx5eIwogx4 + return 0 + sleep 30 + kubectl_bin get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + yq eval '(.spec.template.spec.containers[0].env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.pQWh3ufkru + local LAST_OUT=/tmp/tmp.ESyqH85bIz ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.V8p2Q1qLf1 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.P6nRRtJncH + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment/percona-server-mongodb-operator -o yaml -n psmdb-operator + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pQWh3ufkru + cat /tmp/tmp.V8p2Q1qLf1 + rm /tmp/tmp.pQWh3ufkru /tmp/tmp.V8p2Q1qLf1 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ESyqH85bIz deployment.apps/percona-server-mongodb-operator configured + cat /tmp/tmp.P6nRRtJncH + rm /tmp/tmp.ESyqH85bIz /tmp/tmp.P6nRRtJncH + return 0 + wait_deployment percona-server-mongodb-operator + local name=percona-server-mongodb-operator + sleep 10 + retry=0 + echo -n percona-server-mongodb-operator percona-server-mongodb-operator+ kubectl_bin get deployment percona-server-mongodb-operator -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.flZrnDS08p ++ mktemp + local LAST_ERR=/tmp/tmp.V1fVCCdxDb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get deployment percona-server-mongodb-operator -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.flZrnDS08p + cat /tmp/tmp.V1fVCCdxDb + rm /tmp/tmp.flZrnDS08p /tmp/tmp.V1fVCCdxDb + return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.IJL0f0AOBk +++ mktemp ++ local LAST_ERR=/tmp/tmp.pmdO4lqiiZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.replicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IJL0f0AOBk ++ cat /tmp/tmp.pmdO4lqiiZ ++ rm /tmp/tmp.IJL0f0AOBk /tmp/tmp.pmdO4lqiiZ ++ return 0 ++ kubectl_bin get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.nt9Unxhkmh +++ mktemp ++ local LAST_ERR=/tmp/tmp.x4ugsm6x2E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get deployment percona-server-mongodb-operator -o 'jsonpath={.status.readyReplicas}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nt9Unxhkmh ++ cat /tmp/tmp.x4ugsm6x2E ++ rm /tmp/tmp.nt9Unxhkmh /tmp/tmp.x4ugsm6x2E ++ return 0 + '[' 1 == 1 ']' + kubectl_bin delete pod -l run=version-service-cr -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.FXEgjzjl4Z ++ mktemp + local LAST_ERR=/tmp/tmp.EYwoVdRPLQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service-cr -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FXEgjzjl4Z pod "version-service-cr-7db489d9c4-w5l98" deleted from psmdb-operator namespace + cat /tmp/tmp.EYwoVdRPLQ + rm /tmp/tmp.FXEgjzjl4Z /tmp/tmp.EYwoVdRPLQ + return 0 + kubectl_bin delete pod -l run=version-service -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.e4s7o33k3e ++ mktemp + local LAST_ERR=/tmp/tmp.iVi37f3SUb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -l run=version-service -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e4s7o33k3e pod "version-service-96477994-h78kz" deleted from psmdb-operator namespace + cat /tmp/tmp.iVi37f3SUb + rm /tmp/tmp.e4s7o33k3e /tmp/tmp.iVi37f3SUb + return 0 + check_telemetry_transfer http://version-service-cr:11000 disabled disabled + local cr_vs_uri=http://version-service-cr:11000 + local cr_vs_channel=disabled + local telemetry_state=disabled + cluster=minimal-cluster + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.umLTf6OpkL ++ mktemp + local LAST_ERR=/tmp/tmp.h8wy294PIg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.umLTf6OpkL deployment.apps/psmdb-client created + cat /tmp/tmp.h8wy294PIg + rm /tmp/tmp.umLTf6OpkL /tmp/tmp.h8wy294PIg + return 0 + yq eval '.metadata.name = "minimal-cluster"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OP72ESTXNl ++ mktemp + local LAST_ERR=/tmp/tmp.QnjTt2WMs2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OP72ESTXNl secret/minimal-cluster configured + cat /tmp/tmp.QnjTt2WMs2 Warning: resource secrets/minimal-cluster is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.OP72ESTXNl /tmp/tmp.QnjTt2WMs2 + return 0 + desc 'create PSMDB minimal cluster minimal-cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB minimal cluster minimal-cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "http://version-service-cr:11000" | .spec.upgradeOptions.apply = "disabled" | .spec.initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db" | .spec.crVersion = "9.9.9" | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.enabled = false | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cr-minimal.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.vNIgIsUU4z ++ mktemp + local LAST_ERR=/tmp/tmp.oF4IfZNjn1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vNIgIsUU4z perconaservermongodb.psmdb.percona.com/minimal-cluster created + cat /tmp/tmp.oF4IfZNjn1 + rm /tmp/tmp.vNIgIsUU4z /tmp/tmp.oF4IfZNjn1 + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running minimal-cluster-rs0 1 + local name=minimal-cluster-rs0 + let last_pod=0 + : + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=minimal-cluster ++ seq 0 0 + for i in $(seq 0 $last_pod) + [[ 0 -eq 0 ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ATKAw6YHvE +++ mktemp ++ local LAST_ERR=/tmp/tmp.oaEWg1NHKb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ATKAw6YHvE ++ cat /tmp/tmp.oaEWg1NHKb ++ rm /tmp/tmp.ATKAw6YHvE /tmp/tmp.oaEWg1NHKb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod minimal-cluster-rs0-0 + local pod=minimal-cluster-rs0-0 + set +o xtrace waiting for pod/minimal-cluster-rs0-0 to be ready.............OK ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kwy1H56kk4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Hi0jFXQxq8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kwy1H56kk4 ++ cat /tmp/tmp.Hi0jFXQxq8 ++ rm /tmp/tmp.kwy1H56kk4 /tmp/tmp.Hi0jFXQxq8 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lYY8fAZbXd +++ mktemp ++ local LAST_ERR=/tmp/tmp.2hQDKgHTa0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb minimal-cluster -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lYY8fAZbXd ++ cat /tmp/tmp.2hQDKgHTa0 ++ rm /tmp/tmp.lYY8fAZbXd /tmp/tmp.2hQDKgHTa0 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................ + sleep 20 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zKr7gf74Ec +++ mktemp ++ local LAST_ERR=/tmp/tmp.zZv199zDWE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zKr7gf74Ec ++ cat /tmp/tmp.zZv199zDWE ++ rm /tmp/tmp.zKr7gf74Ec /tmp/tmp.zZv199zDWE ++ return 0 + local client_container=psmdb-client-bb8b97679-wdxjm + kubectl_bin exec psmdb-client-bb8b97679-wdxjm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.iDbnXpeA5M ++ mktemp + local LAST_ERR=/tmp/tmp.JRjIxMaPrI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-wdxjm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iDbnXpeA5M Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f5ace10d-0359-4779-b9e5-60968d422346") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.JRjIxMaPrI + rm /tmp/tmp.iDbnXpeA5M /tmp/tmp.JRjIxMaPrI + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@minimal-cluster-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@minimal-cluster-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@minimal-cluster-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZhjTEFMZf0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mScZ9On41f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZhjTEFMZf0 ++ cat /tmp/tmp.mScZ9On41f ++ rm /tmp/tmp.ZhjTEFMZf0 /tmp/tmp.mScZ9On41f ++ return 0 + local client_container=psmdb-client-bb8b97679-wdxjm + kubectl_bin exec psmdb-client-bb8b97679-wdxjm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yjGFdhPeke ++ mktemp + local LAST_ERR=/tmp/tmp.bKTx40kVsG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-wdxjm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@minimal-cluster-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yjGFdhPeke Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://minimal-cluster-rs0-0.minimal-cluster-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("9fcce67f-c84a-4531-88e6-74c4ef280e38") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.bKTx40kVsG + rm /tmp/tmp.yjGFdhPeke /tmp/tmp.bKTx40kVsG + return 0 + desc 'check telemetry' + set +o xtrace ----------------------------------------------------------------------------------- check telemetry ----------------------------------------------------------------------------------- + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service-cr -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-cr-7db489d9c4-wgfhl -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.sIznWvRzSs ++ mktemp + local LAST_ERR=/tmp/tmp.ZJG14nRLBR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-cr-7db489d9c4-wgfhl -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sIznWvRzSs + cat /tmp/tmp.ZJG14nRLBR + rm /tmp/tmp.sIznWvRzSs /tmp/tmp.ZJG14nRLBR + return 0 + grep -E 'server request payload|unary call' + grep -Eo '\{.*\}' ++ kubectl get pods --selector=run=version-service -o 'jsonpath={.items[0].metadata.name}' -n psmdb-operator + jq 'del(."grpc.request.content".msg.customResourceUid)' + jq 'del(."grpc.request.content".msg.kubeVersion)' + jq 'del(."grpc.start_time")' + jq 'del(."grpc.time_ms")' + kubectl_bin logs version-service-96477994-9jdzn -n psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.S9XwMfDI8i ++ mktemp + local LAST_ERR=/tmp/tmp.8qWCzuKf5o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs version-service-96477994-9jdzn -n psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S9XwMfDI8i + cat /tmp/tmp.8qWCzuKf5o + rm /tmp/tmp.S9XwMfDI8i /tmp/tmp.8qWCzuKf5o + return 0 + local telemetry_log_file=disabled_telemetry.version-service.log-cw.json + desc 'telemetry was disabled in CR but in operator not' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR but in operator not ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == enabled ']' + local telemetry_cr_log_file=disabled_telemetry.version-service-cr.log-cw.json + local image_prefix=disabled + desc 'telemetry was disabled in operator but not in CR' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in operator but not in CR ----------------------------------------------------------------------------------- + '[' disabled == disabled-recommended -a disabled == disabled ']' + desc 'telemetry was disabled in CR as well as in operator' + set +o xtrace ----------------------------------------------------------------------------------- telemetry was disabled in CR as well as in operator ----------------------------------------------------------------------------------- + '[' disabled == disabled -a disabled == disabled ']' + [[ -s /tmp/tmp.mfX3wGCZe1/disabled_telemetry.version-service-cr.log.json ]] + [[ -s /tmp/tmp.mfX3wGCZe1/disabled_telemetry.version-service.log.json ]] ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.pIfOzcjXjn +++ mktemp ++ local LAST_ERR=/tmp/tmp.E0DmduDW9u ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pIfOzcjXjn ++ cat /tmp/tmp.E0DmduDW9u ++ rm /tmp/tmp.pIfOzcjXjn /tmp/tmp.E0DmduDW9u ++ return 0 + kubectl_bin delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-rjjfp ++ mktemp + local LAST_OUT=/tmp/tmp.uwsyfJrHtm ++ mktemp + local LAST_ERR=/tmp/tmp.bXvY7e1FK8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-rjjfp + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uwsyfJrHtm pod "percona-server-mongodb-operator-5bd97c4cbd-rjjfp" deleted from psmdb-operator namespace + cat /tmp/tmp.bXvY7e1FK8 + rm /tmp/tmp.uwsyfJrHtm /tmp/tmp.bXvY7e1FK8 + return 0 + kubectl_bin patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' ++ mktemp + local LAST_OUT=/tmp/tmp.a2Xt0kCfQZ ++ mktemp + local LAST_ERR=/tmp/tmp.Qf95IP2QEN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb minimal-cluster --type=merge -p '{"metadata":{"finalizers":["percona.com/delete-psmdb-pvc"]}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a2Xt0kCfQZ perconaservermongodb.psmdb.percona.com/minimal-cluster patched + cat /tmp/tmp.Qf95IP2QEN + rm /tmp/tmp.a2Xt0kCfQZ /tmp/tmp.Qf95IP2QEN + return 0 + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.FM8kfbmO5g ++ mktemp + local LAST_ERR=/tmp/tmp.bO88iaU1Kl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FM8kfbmO5g perconaservermongodb.psmdb.percona.com "minimal-cluster" deleted from version-service-13414 namespace + cat /tmp/tmp.bO88iaU1Kl + rm /tmp/tmp.FM8kfbmO5g /tmp/tmp.bO88iaU1Kl + return 0 + kubectl_bin delete deploy psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.MQFbeS8CXd ++ mktemp + local LAST_ERR=/tmp/tmp.kXaYl3tvIf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete deploy psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MQFbeS8CXd deployment.apps "psmdb-client" deleted from version-service-13414 namespace + cat /tmp/tmp.kXaYl3tvIf + rm /tmp/tmp.MQFbeS8CXd /tmp/tmp.kXaYl3tvIf + return 0 + sleep 30 + cases=("version-service-exact" "version-service-recommended" "version-service-latest" "version-service-major" "version-service-unreachable") + expected_images=("percona/percona-server-mongodb:6.0.3-2" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:8.0.4-1-multi" "percona/percona-server-mongodb:6.0.4-3" "$IMAGE_MONGOD") + for i in "${!cases[@]}" + desc 'test version-service-exact' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-exact ----------------------------------------------------------------------------------- + cluster=version-service-exact + expected_image=percona/percona-server-mongodb:6.0.3-2 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.LdtP04LHZE ++ mktemp + local LAST_ERR=/tmp/tmp.iyawIgHaMF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LdtP04LHZE secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.iyawIgHaMF + rm /tmp/tmp.LdtP04LHZE /tmp/tmp.iyawIgHaMF + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.JZnuuxOxRb + sed s%#initImage%docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/version-service-exact-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.JZnuuxOxRb + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Z7rVgTr3Vn ++ mktemp + local LAST_ERR=/tmp/tmp.j8sF6ke2cc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z7rVgTr3Vn perconaservermongodb.psmdb.percona.com/version-service-exact created + cat /tmp/tmp.j8sF6ke2cc + rm /tmp/tmp.Z7rVgTr3Vn /tmp/tmp.j8sF6ke2cc + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-exact-rs0 3 + local name=version-service-exact-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-exact ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-exact-rs0-0 + local pod=version-service-exact-rs0-0 + set +o xtrace waiting for pod/version-service-exact-rs0-0 to be ready....................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-exact-rs0-1 + local pod=version-service-exact-rs0-1 + set +o xtrace waiting for pod/version-service-exact-rs0-1 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f3Qnoaqkx3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0SYpCBhWX2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f3Qnoaqkx3 ++ cat /tmp/tmp.0SYpCBhWX2 ++ rm /tmp/tmp.f3Qnoaqkx3 /tmp/tmp.0SYpCBhWX2 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-exact-rs0-2 + local pod=version-service-exact-rs0-2 + set +o xtrace waiting for pod/version-service-exact-rs0-2 to be ready..................OK ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IdxDG0vEnb +++ mktemp ++ local LAST_ERR=/tmp/tmp.EUMElpoflI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IdxDG0vEnb ++ cat /tmp/tmp.EUMElpoflI ++ rm /tmp/tmp.IdxDG0vEnb /tmp/tmp.EUMElpoflI ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XvH4IRDizG +++ mktemp ++ local LAST_ERR=/tmp/tmp.W0fYmHO9jY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-exact -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XvH4IRDizG ++ cat /tmp/tmp.W0fYmHO9jY ++ rm /tmp/tmp.XvH4IRDizG /tmp/tmp.W0fYmHO9jY ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.jFBm1iaWFZ ++ mktemp + local LAST_ERR=/tmp/tmp.psKleUQWbU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jFBm1iaWFZ + cat /tmp/tmp.psKleUQWbU + rm /tmp/tmp.jFBm1iaWFZ /tmp/tmp.psKleUQWbU + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + log 'compare_kubectl: statefulset/version-service-exact-rs0 OK' + set +o xtrace [2026-03-11T00:13:41+0000] compare_kubectl: statefulset/version-service-exact-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-exact-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-exact-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@version-service-exact-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.miMBkt7g1V +++ mktemp ++ local LAST_ERR=/tmp/tmp.q3QN03ZEo6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.miMBkt7g1V ++ cat /tmp/tmp.q3QN03ZEo6 ++ rm /tmp/tmp.miMBkt7g1V /tmp/tmp.q3QN03ZEo6 ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jbh9woRISt ++ mktemp + local LAST_ERR=/tmp/tmp.A4swjZl4nb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jbh9woRISt + cat /tmp/tmp.A4swjZl4nb error: Internal error occurred: unable to upgrade connection: container not found ("psmdb-client") + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jbh9woRISt + cat /tmp/tmp.A4swjZl4nb error: Internal error occurred: unable to upgrade connection: container not found ("psmdb-client") + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-exact-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jbh9woRISt Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-0.version-service-exact-rs0.version-service-13414.svc.cluster.local:27017,version-service-exact-rs0-2.version-service-exact-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("fff685e2-5b19-4de6-9c17-f734289afe7d") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.A4swjZl4nb + rm /tmp/tmp.jbh9woRISt /tmp/tmp.A4swjZl4nb + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-exact-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-exact-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@version-service-exact-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hfNrRn7E4M +++ mktemp ++ local LAST_ERR=/tmp/tmp.WFJ0mUJFeh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hfNrRn7E4M ++ cat /tmp/tmp.WFJ0mUJFeh ++ rm /tmp/tmp.hfNrRn7E4M /tmp/tmp.WFJ0mUJFeh ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.rolQHvm6ro ++ mktemp + local LAST_ERR=/tmp/tmp.yFVrt2G3bI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-exact-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rolQHvm6ro Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-exact-rs0-0.version-service-exact-rs0.version-service-13414.svc.cluster.local:27017,version-service-exact-rs0-2.version-service-exact-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("91bf9af2-97bf-48bf-a9d3-8f37ea04d2d7") } Percona Server for MongoDB server version: v6.0.3-2 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.yFVrt2G3bI + rm /tmp/tmp.rolQHvm6ro /tmp/tmp.yFVrt2G3bI + return 0 + compare_kubectl statefulset/version-service-exact-rs0 + local resource=statefulset/version-service-exact-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-exact-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.DV77QIpn6x ++ mktemp + local LAST_ERR=/tmp/tmp.QlMchmfE4e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-exact-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DV77QIpn6x + cat /tmp/tmp.QlMchmfE4e + rm /tmp/tmp.DV77QIpn6x /tmp/tmp.QlMchmfE4e + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-exact-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-exact-rs0.yml + log 'compare_kubectl: statefulset/version-service-exact-rs0 OK' + set +o xtrace [2026-03-11T00:13:54+0000] compare_kubectl: statefulset/version-service-exact-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-exact-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.3-2 + '[' percona/percona-server-mongodb:6.0.3-2 '!=' percona/percona-server-mongodb:6.0.3-2 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.a0p0d7u1VL ++ mktemp + local LAST_ERR=/tmp/tmp.RfF1Mab2Va + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a0p0d7u1VL perconaservermongodb.psmdb.percona.com "version-service-exact" deleted from version-service-13414 namespace + cat /tmp/tmp.RfF1Mab2Va + rm /tmp/tmp.a0p0d7u1VL /tmp/tmp.RfF1Mab2Va + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.5Z8hI99gqg +++ mktemp ++ local LAST_ERR=/tmp/tmp.2QivbuGtYK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5Z8hI99gqg ++ cat /tmp/tmp.2QivbuGtYK ++ rm /tmp/tmp.5Z8hI99gqg /tmp/tmp.2QivbuGtYK ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-p8hf6 pod "percona-server-mongodb-operator-5bd97c4cbd-p8hf6" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-recommended' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-recommended ----------------------------------------------------------------------------------- + cluster=version-service-recommended + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WP3MOEofxc ++ mktemp + local LAST_ERR=/tmp/tmp.8vqH8yHqAb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WP3MOEofxc secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.8vqH8yHqAb Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.WP3MOEofxc /tmp/tmp.8vqH8yHqAb + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.QHO6hv1yrb + sed s%#initImage%docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/version-service-recommended-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.QHO6hv1yrb + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OzYI9VUXjo ++ mktemp + local LAST_ERR=/tmp/tmp.WEuKETOZ1R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OzYI9VUXjo perconaservermongodb.psmdb.percona.com/version-service-recommended created + cat /tmp/tmp.WEuKETOZ1R + rm /tmp/tmp.OzYI9VUXjo /tmp/tmp.WEuKETOZ1R + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-recommended-rs0 3 + local name=version-service-recommended-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-recommended ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-recommended-rs0-0 + local pod=version-service-recommended-rs0-0 + set +o xtrace waiting for pod/version-service-recommended-rs0-0 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-recommended-rs0-1 + local pod=version-service-recommended-rs0-1 + set +o xtrace waiting for pod/version-service-recommended-rs0-1 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yv6f8uR9Ya +++ mktemp ++ local LAST_ERR=/tmp/tmp.7pI0SahtT1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yv6f8uR9Ya ++ cat /tmp/tmp.7pI0SahtT1 ++ rm /tmp/tmp.yv6f8uR9Ya /tmp/tmp.7pI0SahtT1 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-recommended-rs0-2 + local pod=version-service-recommended-rs0-2 + set +o xtrace waiting for pod/version-service-recommended-rs0-2 to be ready............OK ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qTMvvtYocO +++ mktemp ++ local LAST_ERR=/tmp/tmp.dvGbRfMxlP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qTMvvtYocO ++ cat /tmp/tmp.dvGbRfMxlP ++ rm /tmp/tmp.qTMvvtYocO /tmp/tmp.dvGbRfMxlP ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NpGtptTJSW +++ mktemp ++ local LAST_ERR=/tmp/tmp.eiUxNoorrz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-recommended -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NpGtptTJSW ++ cat /tmp/tmp.eiUxNoorrz ++ rm /tmp/tmp.NpGtptTJSW /tmp/tmp.eiUxNoorrz ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Ij0n3llBFM ++ mktemp + local LAST_ERR=/tmp/tmp.nFvK0mK7tQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ij0n3llBFM + cat /tmp/tmp.nFvK0mK7tQ + rm /tmp/tmp.Ij0n3llBFM /tmp/tmp.nFvK0mK7tQ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + log 'compare_kubectl: statefulset/version-service-recommended-rs0 OK' + set +o xtrace [2026-03-11T00:18:08+0000] compare_kubectl: statefulset/version-service-recommended-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.06ub0KH4GB +++ mktemp ++ local LAST_ERR=/tmp/tmp.ncRh7F38eW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.06ub0KH4GB ++ cat /tmp/tmp.ncRh7F38eW ++ rm /tmp/tmp.06ub0KH4GB /tmp/tmp.ncRh7F38eW ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.VFa3iWy5UI ++ mktemp + local LAST_ERR=/tmp/tmp.jqMKMo4IZp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-recommended-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VFa3iWy5UI Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-13414.svc.cluster.local:27017,version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-13414.svc.cluster.local:27017,version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("fa8ff922-3f64-46ad-8a1b-8ffcb169e2fe") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.jqMKMo4IZp + rm /tmp/tmp.VFa3iWy5UI /tmp/tmp.jqMKMo4IZp + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-recommended-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-recommended-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@version-service-recommended-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.peMzgdtoWz +++ mktemp ++ local LAST_ERR=/tmp/tmp.YA5UPIjsj7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.peMzgdtoWz ++ cat /tmp/tmp.YA5UPIjsj7 ++ rm /tmp/tmp.peMzgdtoWz /tmp/tmp.YA5UPIjsj7 ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.55Yi39oFC8 ++ mktemp + local LAST_ERR=/tmp/tmp.0uaLDHE4CC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-recommended-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.55Yi39oFC8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-recommended-rs0-1.version-service-recommended-rs0.version-service-13414.svc.cluster.local:27017,version-service-recommended-rs0-0.version-service-recommended-rs0.version-service-13414.svc.cluster.local:27017,version-service-recommended-rs0-2.version-service-recommended-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f392c53b-3f54-487a-a577-556c8973b4d6") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.0uaLDHE4CC + rm /tmp/tmp.55Yi39oFC8 /tmp/tmp.0uaLDHE4CC + return 0 + compare_kubectl statefulset/version-service-recommended-rs0 + local resource=statefulset/version-service-recommended-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-recommended-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ohxahePdkQ ++ mktemp + local LAST_ERR=/tmp/tmp.OjYAcoibYF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-recommended-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ohxahePdkQ + cat /tmp/tmp.OjYAcoibYF + rm /tmp/tmp.ohxahePdkQ /tmp/tmp.OjYAcoibYF + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-recommended-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-recommended-rs0.yml + log 'compare_kubectl: statefulset/version-service-recommended-rs0 OK' + set +o xtrace [2026-03-11T00:18:15+0000] compare_kubectl: statefulset/version-service-recommended-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-recommended-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.LmYAAqoLsK ++ mktemp + local LAST_ERR=/tmp/tmp.lNGgVOak6h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LmYAAqoLsK perconaservermongodb.psmdb.percona.com "version-service-recommended" deleted from version-service-13414 namespace + cat /tmp/tmp.lNGgVOak6h + rm /tmp/tmp.LmYAAqoLsK /tmp/tmp.lNGgVOak6h + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.4ggDqXpL7r +++ mktemp ++ local LAST_ERR=/tmp/tmp.oB9TkdByVV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4ggDqXpL7r ++ cat /tmp/tmp.oB9TkdByVV ++ rm /tmp/tmp.4ggDqXpL7r /tmp/tmp.oB9TkdByVV ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-b78wg pod "percona-server-mongodb-operator-5bd97c4cbd-b78wg" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-latest' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-latest ----------------------------------------------------------------------------------- + cluster=version-service-latest + expected_image=percona/percona-server-mongodb:8.0.4-1-multi + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.b5tV2nPyEl ++ mktemp + local LAST_ERR=/tmp/tmp.APfEhLtMey + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b5tV2nPyEl secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.APfEhLtMey Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.b5tV2nPyEl /tmp/tmp.APfEhLtMey + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.NFDhP5mgub + sed s%#initImage%docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/version-service-latest-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.NFDhP5mgub + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.fjQLU56AaZ ++ mktemp + local LAST_ERR=/tmp/tmp.IusjpTN45t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fjQLU56AaZ perconaservermongodb.psmdb.percona.com/version-service-latest created + cat /tmp/tmp.IusjpTN45t + rm /tmp/tmp.fjQLU56AaZ /tmp/tmp.IusjpTN45t + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-latest-rs0 3 + local name=version-service-latest-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-latest ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-latest-rs0-0 + local pod=version-service-latest-rs0-0 + set +o xtrace waiting for pod/version-service-latest-rs0-0 to be ready.................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-latest-rs0-1 + local pod=version-service-latest-rs0-1 + set +o xtrace waiting for pod/version-service-latest-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AlI6mkUC5A +++ mktemp ++ local LAST_ERR=/tmp/tmp.IymLq9CD4T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AlI6mkUC5A ++ cat /tmp/tmp.IymLq9CD4T ++ rm /tmp/tmp.AlI6mkUC5A /tmp/tmp.IymLq9CD4T ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-latest-rs0-2 + local pod=version-service-latest-rs0-2 + set +o xtrace waiting for pod/version-service-latest-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rmsYqUKcev +++ mktemp ++ local LAST_ERR=/tmp/tmp.yXW6tv5wTt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rmsYqUKcev ++ cat /tmp/tmp.yXW6tv5wTt ++ rm /tmp/tmp.rmsYqUKcev /tmp/tmp.yXW6tv5wTt ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fvKboRZZXp +++ mktemp ++ local LAST_ERR=/tmp/tmp.7LVBSUzm7y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-latest -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fvKboRZZXp ++ cat /tmp/tmp.7LVBSUzm7y ++ rm /tmp/tmp.fvKboRZZXp /tmp/tmp.7LVBSUzm7y ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.RciJsAplie ++ mktemp + local LAST_ERR=/tmp/tmp.ocf5AAyfTe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RciJsAplie + cat /tmp/tmp.ocf5AAyfTe + rm /tmp/tmp.RciJsAplie /tmp/tmp.ocf5AAyfTe + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + log 'compare_kubectl: statefulset/version-service-latest-rs0 OK' + set +o xtrace [2026-03-11T00:20:47+0000] compare_kubectl: statefulset/version-service-latest-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-latest-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-latest-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@version-service-latest-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.28vIQpYgSb +++ mktemp ++ local LAST_ERR=/tmp/tmp.WFdOEs5gRB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.28vIQpYgSb ++ cat /tmp/tmp.WFdOEs5gRB ++ rm /tmp/tmp.28vIQpYgSb /tmp/tmp.WFdOEs5gRB ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tu7FEtuSxJ ++ mktemp + local LAST_ERR=/tmp/tmp.ujZTKvUYur + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-latest-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tu7FEtuSxJ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-2.version-service-latest-rs0.version-service-13414.svc.cluster.local:27017,version-service-latest-rs0-0.version-service-latest-rs0.version-service-13414.svc.cluster.local:27017,version-service-latest-rs0-1.version-service-latest-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("ca80b40a-3482-4b3f-9d95-a6992263db01") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.ujZTKvUYur + rm /tmp/tmp.tu7FEtuSxJ /tmp/tmp.ujZTKvUYur + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-latest-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-latest-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@version-service-latest-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.b9KLaDS0wk +++ mktemp ++ local LAST_ERR=/tmp/tmp.1RwO1639Ke ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.b9KLaDS0wk ++ cat /tmp/tmp.1RwO1639Ke ++ rm /tmp/tmp.b9KLaDS0wk /tmp/tmp.1RwO1639Ke ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jBUCuLPIF6 ++ mktemp + local LAST_ERR=/tmp/tmp.iUeBwWm9kS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-latest-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jBUCuLPIF6 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-latest-rs0-2.version-service-latest-rs0.version-service-13414.svc.cluster.local:27017,version-service-latest-rs0-0.version-service-latest-rs0.version-service-13414.svc.cluster.local:27017,version-service-latest-rs0-1.version-service-latest-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b5b78c1d-a2eb-4142-825a-ddacf478335a") } Percona Server for MongoDB server version: v8.0.4-1 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.iUeBwWm9kS + rm /tmp/tmp.jBUCuLPIF6 /tmp/tmp.iUeBwWm9kS + return 0 + compare_kubectl statefulset/version-service-latest-rs0 + local resource=statefulset/version-service-latest-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-latest-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.nEKFGwpzM3 ++ mktemp + local LAST_ERR=/tmp/tmp.3AnrswrcSn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-latest-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nEKFGwpzM3 + cat /tmp/tmp.3AnrswrcSn + rm /tmp/tmp.nEKFGwpzM3 /tmp/tmp.3AnrswrcSn + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-latest-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-latest-rs0.yml + log 'compare_kubectl: statefulset/version-service-latest-rs0 OK' + set +o xtrace [2026-03-11T00:20:53+0000] compare_kubectl: statefulset/version-service-latest-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-latest-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:8.0.4-1-multi + '[' percona/percona-server-mongodb:8.0.4-1-multi '!=' percona/percona-server-mongodb:8.0.4-1-multi ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.GwJ0ocd9cc ++ mktemp + local LAST_ERR=/tmp/tmp.lFwBbfu0gw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GwJ0ocd9cc perconaservermongodb.psmdb.percona.com "version-service-latest" deleted from version-service-13414 namespace + cat /tmp/tmp.lFwBbfu0gw + rm /tmp/tmp.GwJ0ocd9cc /tmp/tmp.lFwBbfu0gw + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.sr09Pa9xxK +++ mktemp ++ local LAST_ERR=/tmp/tmp.msGJndoNPo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sr09Pa9xxK ++ cat /tmp/tmp.msGJndoNPo ++ rm /tmp/tmp.sr09Pa9xxK /tmp/tmp.msGJndoNPo ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-s546d pod "percona-server-mongodb-operator-5bd97c4cbd-s546d" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-major' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-major ----------------------------------------------------------------------------------- + cluster=version-service-major + expected_image=percona/percona-server-mongodb:6.0.4-3 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.4OrjGLb6cA ++ mktemp + local LAST_ERR=/tmp/tmp.OcPyfrPpYk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4OrjGLb6cA secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.OcPyfrPpYk Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.4OrjGLb6cA /tmp/tmp.OcPyfrPpYk + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.0LsUGm8MMe + sed s%#initImage%docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/version-service-major-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.0LsUGm8MMe + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zVVqdhQvdI ++ mktemp + local LAST_ERR=/tmp/tmp.4WwCHV3nC1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zVVqdhQvdI perconaservermongodb.psmdb.percona.com/version-service-major created + cat /tmp/tmp.4WwCHV3nC1 + rm /tmp/tmp.zVVqdhQvdI /tmp/tmp.4WwCHV3nC1 + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-major-rs0 3 + local name=version-service-major-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-major ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-major-rs0-0 + local pod=version-service-major-rs0-0 + set +o xtrace waiting for pod/version-service-major-rs0-0 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-major-rs0-1 + local pod=version-service-major-rs0-1 + set +o xtrace waiting for pod/version-service-major-rs0-1 to be ready..............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CTUDirgkBz +++ mktemp ++ local LAST_ERR=/tmp/tmp.v70d9GMdI2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CTUDirgkBz ++ cat /tmp/tmp.v70d9GMdI2 ++ rm /tmp/tmp.CTUDirgkBz /tmp/tmp.v70d9GMdI2 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-major-rs0-2 + local pod=version-service-major-rs0-2 + set +o xtrace waiting for pod/version-service-major-rs0-2 to be ready....................OK ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MtKmg5EMT1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zc4MjFFF34 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MtKmg5EMT1 ++ cat /tmp/tmp.Zc4MjFFF34 ++ rm /tmp/tmp.MtKmg5EMT1 /tmp/tmp.Zc4MjFFF34 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LVncrgfe0I +++ mktemp ++ local LAST_ERR=/tmp/tmp.85yD3rovs9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-major -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LVncrgfe0I ++ cat /tmp/tmp.85yD3rovs9 ++ rm /tmp/tmp.LVncrgfe0I /tmp/tmp.85yD3rovs9 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.CUrSLEJOWx ++ mktemp + local LAST_ERR=/tmp/tmp.4AT0UGtxJG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CUrSLEJOWx + cat /tmp/tmp.4AT0UGtxJG + rm /tmp/tmp.CUrSLEJOWx /tmp/tmp.4AT0UGtxJG + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + log 'compare_kubectl: statefulset/version-service-major-rs0 OK' + set +o xtrace [2026-03-11T00:23:40+0000] compare_kubectl: statefulset/version-service-major-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-major-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-major-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@version-service-major-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i4MQEUyMhG +++ mktemp ++ local LAST_ERR=/tmp/tmp.yRQ35TChVO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.i4MQEUyMhG ++ cat /tmp/tmp.yRQ35TChVO ++ rm /tmp/tmp.i4MQEUyMhG /tmp/tmp.yRQ35TChVO ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JDlTphLvpG ++ mktemp + local LAST_ERR=/tmp/tmp.xgCtKLznk4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-major-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JDlTphLvpG Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-2.version-service-major-rs0.version-service-13414.svc.cluster.local:27017,version-service-major-rs0-0.version-service-major-rs0.version-service-13414.svc.cluster.local:27017,version-service-major-rs0-1.version-service-major-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("833fb505-380a-408d-bb33-d975b4ed8072") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.xgCtKLznk4 + rm /tmp/tmp.JDlTphLvpG /tmp/tmp.xgCtKLznk4 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-major-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-major-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@version-service-major-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qhtER2p2Yp +++ mktemp ++ local LAST_ERR=/tmp/tmp.WX4vmToO56 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qhtER2p2Yp ++ cat /tmp/tmp.WX4vmToO56 ++ rm /tmp/tmp.qhtER2p2Yp /tmp/tmp.WX4vmToO56 ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.g5RxgJo7MM ++ mktemp + local LAST_ERR=/tmp/tmp.k7akOnnz7s + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-major-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g5RxgJo7MM Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-major-rs0-2.version-service-major-rs0.version-service-13414.svc.cluster.local:27017,version-service-major-rs0-0.version-service-major-rs0.version-service-13414.svc.cluster.local:27017,version-service-major-rs0-1.version-service-major-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b948cd1f-8807-46c0-9ff2-7414c6ca99db") } Percona Server for MongoDB server version: v6.0.4-3 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.k7akOnnz7s + rm /tmp/tmp.g5RxgJo7MM /tmp/tmp.k7akOnnz7s + return 0 + compare_kubectl statefulset/version-service-major-rs0 + local resource=statefulset/version-service-major-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-major-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.VRqnY4cbVF ++ mktemp + local LAST_ERR=/tmp/tmp.VQnylr00uW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-major-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VRqnY4cbVF + cat /tmp/tmp.VQnylr00uW + rm /tmp/tmp.VRqnY4cbVF /tmp/tmp.VQnylr00uW + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-major-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-major-rs0.yml + log 'compare_kubectl: statefulset/version-service-major-rs0 OK' + set +o xtrace [2026-03-11T00:23:46+0000] compare_kubectl: statefulset/version-service-major-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-major-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=percona/percona-server-mongodb:6.0.4-3 + '[' percona/percona-server-mongodb:6.0.4-3 '!=' percona/percona-server-mongodb:6.0.4-3 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.LNvcZR0Mg9 ++ mktemp + local LAST_ERR=/tmp/tmp.QQJnZOOgoC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LNvcZR0Mg9 perconaservermongodb.psmdb.percona.com "version-service-major" deleted from version-service-13414 namespace + cat /tmp/tmp.QQJnZOOgoC + rm /tmp/tmp.LNvcZR0Mg9 /tmp/tmp.QQJnZOOgoC + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.TPt4Rp7M49 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jsvOZ39il8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TPt4Rp7M49 ++ cat /tmp/tmp.jsvOZ39il8 ++ rm /tmp/tmp.TPt4Rp7M49 /tmp/tmp.jsvOZ39il8 ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-2nkhq pod "percona-server-mongodb-operator-5bd97c4cbd-2nkhq" deleted from psmdb-operator namespace + sleep 10 + for i in "${!cases[@]}" + desc 'test version-service-unreachable' + set +o xtrace ----------------------------------------------------------------------------------- test version-service-unreachable ----------------------------------------------------------------------------------- + cluster=version-service-unreachable + expected_image=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WqkpaETEQX ++ mktemp + local LAST_ERR=/tmp/tmp.LhqYwbToqv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WqkpaETEQX secret/some-users configured deployment.apps/psmdb-client unchanged + cat /tmp/tmp.LhqYwbToqv Warning: resource secrets/some-users is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.WqkpaETEQX /tmp/tmp.LhqYwbToqv + return 0 + desc 'create PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster ----------------------------------------------------------------------------------- ++ mktemp + tmp_file=/tmp/tmp.0MMsxbJbey + sed s%#initImage%docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db%g /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/conf/version-service-unreachable-rs0.yml + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f - + yq eval ' .spec.backup.enabled = false | del(.spec.backup.tasks) | .spec.image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0" | .spec.pmm.image = "docker.io/percona/pmm-client:2.44.1-1" | .spec.backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' /tmp/tmp.0MMsxbJbey ++ mktemp + local LAST_OUT=/tmp/tmp.jue3Mv00Kz ++ mktemp + local LAST_ERR=/tmp/tmp.0pXwLf2Knf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jue3Mv00Kz perconaservermongodb.psmdb.percona.com/version-service-unreachable created + cat /tmp/tmp.0pXwLf2Knf + rm /tmp/tmp.jue3Mv00Kz /tmp/tmp.0pXwLf2Knf + return 0 + desc 'check if Pod is started' + set +o xtrace ----------------------------------------------------------------------------------- check if Pod is started ----------------------------------------------------------------------------------- + wait_for_running version-service-unreachable-rs0 3 + local name=version-service-unreachable-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=version-service-unreachable ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod version-service-unreachable-rs0-0 + local pod=version-service-unreachable-rs0-0 + set +o xtrace waiting for pod/version-service-unreachable-rs0-0 to be ready.................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod version-service-unreachable-rs0-1 + local pod=version-service-unreachable-rs0-1 + set +o xtrace waiting for pod/version-service-unreachable-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1NqJ45HO5j +++ mktemp ++ local LAST_ERR=/tmp/tmp.AeWP2TPruU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1NqJ45HO5j ++ cat /tmp/tmp.AeWP2TPruU ++ rm /tmp/tmp.1NqJ45HO5j /tmp/tmp.AeWP2TPruU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod version-service-unreachable-rs0-2 + local pod=version-service-unreachable-rs0-2 + set +o xtrace waiting for pod/version-service-unreachable-rs0-2 to be ready..................OK ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MaTDQjURsO +++ mktemp ++ local LAST_ERR=/tmp/tmp.vtNzspAEEE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MaTDQjURsO ++ cat /tmp/tmp.vtNzspAEEE ++ rm /tmp/tmp.MaTDQjURsO /tmp/tmp.vtNzspAEEE ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QWnPLqhA21 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LdQI1gouGQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb version-service-unreachable -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QWnPLqhA21 ++ cat /tmp/tmp.LdQI1gouGQ ++ rm /tmp/tmp.QWnPLqhA21 /tmp/tmp.LdQI1gouGQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + sleep 20 + desc 'check if statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KoMdWrUZba ++ mktemp + local LAST_ERR=/tmp/tmp.fcLw1WP7Dl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KoMdWrUZba + cat /tmp/tmp.fcLw1WP7Dl + rm /tmp/tmp.KoMdWrUZba /tmp/tmp.fcLw1WP7Dl + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + log 'compare_kubectl: statefulset/version-service-unreachable-rs0 OK' + set +o xtrace [2026-03-11T00:26:27+0000] compare_kubectl: statefulset/version-service-unreachable-rs0 OK + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-13414 + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IVKotP63WO +++ mktemp ++ local LAST_ERR=/tmp/tmp.lgZoyZWsn8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IVKotP63WO ++ cat /tmp/tmp.lgZoyZWsn8 ++ rm /tmp/tmp.IVKotP63WO /tmp/tmp.lgZoyZWsn8 ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.jZOfSngRds ++ mktemp + local LAST_ERR=/tmp/tmp.g6FTbazw37 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb+srv://userAdmin:userAdmin123456@version-service-unreachable-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jZOfSngRds Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-13414.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-13414.svc.cluster.local:27017,version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("b42e8918-2456-4b10-abdc-95069676363c") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.g6FTbazw37 + rm /tmp/tmp.jZOfSngRds /tmp/tmp.g6FTbazw37 + return 0 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@version-service-unreachable-rs0.version-service-13414 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@version-service-unreachable-rs0.version-service-13414 + local driver=mongodb+srv + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@version-service-unreachable-rs0.version-service-13414 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3Tzt6gNbbM +++ mktemp ++ local LAST_ERR=/tmp/tmp.14VDWPO8qp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3Tzt6gNbbM ++ cat /tmp/tmp.14VDWPO8qp ++ rm /tmp/tmp.3Tzt6gNbbM /tmp/tmp.14VDWPO8qp ++ return 0 + local client_container=psmdb-client-bb8b97679-dzcwm + kubectl_bin exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.IUFAaxhMIR ++ mktemp + local LAST_ERR=/tmp/tmp.igQWxjoTeW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-dzcwm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb+srv://myApp:myPass@version-service-unreachable-rs0.version-service-13414.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IUFAaxhMIR Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://version-service-unreachable-rs0-2.version-service-unreachable-rs0.version-service-13414.svc.cluster.local:27017,version-service-unreachable-rs0-1.version-service-unreachable-rs0.version-service-13414.svc.cluster.local:27017,version-service-unreachable-rs0-0.version-service-unreachable-rs0.version-service-13414.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("dc2c1ab3-6da6-472d-afb4-7f199b34a2b1") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.igQWxjoTeW + rm /tmp/tmp.IUFAaxhMIR /tmp/tmp.igQWxjoTeW + return 0 + compare_kubectl statefulset/version-service-unreachable-rs0 + local resource=statefulset/version-service-unreachable-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml + local new_result=/tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/version-service-unreachable-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("version-service-13414", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vD2fLiDAdR ++ mktemp + local LAST_ERR=/tmp/tmp.cNLRAyoKPr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/version-service-unreachable-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vD2fLiDAdR + cat /tmp/tmp.cNLRAyoKPr + rm /tmp/tmp.vD2fLiDAdR /tmp/tmp.cNLRAyoKPr + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/version-service/compare/statefulset_version-service-unreachable-rs0.yml /tmp/tmp.mfX3wGCZe1/statefulset_version-service-unreachable-rs0.yml + log 'compare_kubectl: statefulset/version-service-unreachable-rs0 OK' + set +o xtrace [2026-03-11T00:26:33+0000] compare_kubectl: statefulset/version-service-unreachable-rs0 OK + pods=($(kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name)) ++ kubectl get pods -l app.kubernetes.io/name=percona-server-mongodb -o=name + '[' 3 -eq 0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-0 -o 'jsonpath={.spec.containers[0].image}' + img=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 + '[' docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 '!=' docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-1 -o 'jsonpath={.spec.containers[0].image}' + img=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 + '[' docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 '!=' docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 ']' + for pod in "${pods[@]}" ++ kubectl get pod/version-service-unreachable-rs0-2 -o 'jsonpath={.spec.containers[0].image}' + img=docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 + '[' docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 '!=' docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0 ']' + kubectl_bin delete psmdb --all ++ mktemp + local LAST_OUT=/tmp/tmp.SGHK2jNDrN ++ mktemp + local LAST_ERR=/tmp/tmp.OyvUT6UsiB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete psmdb --all + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SGHK2jNDrN perconaservermongodb.psmdb.percona.com "version-service-unreachable" deleted from version-service-13414 namespace + cat /tmp/tmp.OyvUT6UsiB + rm /tmp/tmp.SGHK2jNDrN /tmp/tmp.OyvUT6UsiB + return 0 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.nKZRBokfAd +++ mktemp ++ local LAST_ERR=/tmp/tmp.0iM7oKcg3i ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nKZRBokfAd ++ cat /tmp/tmp.0iM7oKcg3i ++ rm /tmp/tmp.nKZRBokfAd /tmp/tmp.0iM7oKcg3i ++ return 0 + kubectl delete pod -n psmdb-operator percona-server-mongodb-operator-5bd97c4cbd-hdbzc pod "percona-server-mongodb-operator-5bd97c4cbd-hdbzc" deleted from psmdb-operator namespace + sleep 10 + destroy version-service-13414 + local namespace=version-service-13414 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.RfK1Kz44Sj +++ mktemp ++ local LAST_ERR=/tmp/tmp.r2MmzYTzW4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RfK1Kz44Sj ++ cat /tmp/tmp.r2MmzYTzW4 No resources found in version-service-13414 namespace. ++ rm /tmp/tmp.RfK1Kz44Sj /tmp/tmp.r2MmzYTzW4 ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.RJapwRMS7S ++ mktemp + local LAST_ERR=/tmp/tmp.9RBcagLEbS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RJapwRMS7S customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.9RBcagLEbS + rm /tmp/tmp.RJapwRMS7S /tmp/tmp.9RBcagLEbS + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Fc5wN6BQaF ++ mktemp + local LAST_ERR=/tmp/tmp.YMFxI7REGe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fc5wN6BQaF + cat /tmp/tmp.YMFxI7REGe + rm /tmp/tmp.Fc5wN6BQaF /tmp/tmp.YMFxI7REGe + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.eDeKNiIaBY ++ mktemp + local LAST_ERR=/tmp/tmp.s3CoKizyS8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eDeKNiIaBY + cat /tmp/tmp.s3CoKizyS8 + rm /tmp/tmp.eDeKNiIaBY /tmp/tmp.s3CoKizyS8 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.QgBq3Q52hN ++ mktemp + local LAST_ERR=/tmp/tmp.qhkTSjKs49 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QgBq3Q52hN + cat /tmp/tmp.qhkTSjKs49 + rm /tmp/tmp.QgBq3Q52hN /tmp/tmp.qhkTSjKs49 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.RmmB5FEEfI ++ mktemp + local LAST_ERR=/tmp/tmp.3raTmWIWng + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RmmB5FEEfI clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3raTmWIWng + rm /tmp/tmp.RmmB5FEEfI /tmp/tmp.3raTmWIWng + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.xou4RRTfSf ++ mktemp + local LAST_ERR=/tmp/tmp.P4yRR2al8Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.xou4RRTfSf + cat /tmp/tmp.P4yRR2al8Q Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.xou4RRTfSf + cat /tmp/tmp.P4yRR2al8Q Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.xou4RRTfSf + cat /tmp/tmp.P4yRR2al8Q Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.xou4RRTfSf + cat /tmp/tmp.P4yRR2al8Q Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.xou4RRTfSf /tmp/tmp.P4yRR2al8Q + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace version-service-13414 + rm -rf /tmp/tmp.mfX3wGCZe1 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.ZwAXz1OOw0 + local LAST_OUT=/tmp/tmp.FZyIoEzayq ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.KjVqq4NRyh + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.PTugUGprpF + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace version-service-13414 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator