Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/logs/service-per-pod.log Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra service-per-pod-110 + local ns=service-per-pod-110 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.zQ6CbMjRY6 ++ mktemp + local LAST_ERR=/tmp/tmp.V9rYeLzXZD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zQ6CbMjRY6 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.V9rYeLzXZD + rm /tmp/tmp.zQ6CbMjRY6 /tmp/tmp.V9rYeLzXZD + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.xNf0l9S42n ++ mktemp + local LAST_ERR=/tmp/tmp.QkJLr1F12A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xNf0l9S42n + cat /tmp/tmp.QkJLr1F12A + rm /tmp/tmp.xNf0l9S42n /tmp/tmp.QkJLr1F12A + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dofgngal94 ++ mktemp + local LAST_ERR=/tmp/tmp.2tqGqQF4bn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dofgngal94 + cat /tmp/tmp.2tqGqQF4bn + rm /tmp/tmp.dofgngal94 /tmp/tmp.2tqGqQF4bn + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4MpFUabj8j ++ mktemp + local LAST_ERR=/tmp/tmp.r1mT0RnHr2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4MpFUabj8j + cat /tmp/tmp.r1mT0RnHr2 + rm /tmp/tmp.4MpFUabj8j /tmp/tmp.r1mT0RnHr2 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ofIYhuWSbk ++ mktemp + local LAST_ERR=/tmp/tmp.afy3QEscld + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ofIYhuWSbk clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.afy3QEscld + rm /tmp/tmp.ofIYhuWSbk /tmp/tmp.afy3QEscld + return 0 + check_crd_for_deletion PR-2274-acb3b334 + local git_tag=PR-2274-acb3b334 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2274-acb3b334/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/sbin/sed s/---//g + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4EQJ2TRTVa +++ mktemp ++ local LAST_ERR=/tmp/tmp.d3qsm3YNqF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.4EQJ2TRTVa ++ cat /tmp/tmp.d3qsm3YNqF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.4EQJ2TRTVa ++ cat /tmp/tmp.d3qsm3YNqF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.4EQJ2TRTVa ++ cat /tmp/tmp.d3qsm3YNqF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.4EQJ2TRTVa ++ cat /tmp/tmp.d3qsm3YNqF Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.4EQJ2TRTVa /tmp/tmp.d3qsm3YNqF ++ return 1 + [[ '' == Terminating ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + local LAST_OUT=/tmp/tmp.gwjyzyXdE3 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.9OrEYY9eaR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.Eg9ka1Vu4X ++ mktemp + local LAST_ERR=/tmp/tmp.TlEc53epe8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gwjyzyXdE3 + cat /tmp/tmp.9OrEYY9eaR + rm /tmp/tmp.gwjyzyXdE3 /tmp/tmp.9OrEYY9eaR + return 0 namespace "cert-manager" deleted namespace "service-per-pod-16025" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Eg9ka1Vu4X namespace "psmdb-operator" deleted + cat /tmp/tmp.TlEc53epe8 + rm /tmp/tmp.Eg9ka1Vu4X /tmp/tmp.TlEc53epe8 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.3ifRzZNu19 ++ mktemp + local LAST_ERR=/tmp/tmp.PtOuYIbZSl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3ifRzZNu19 + cat /tmp/tmp.PtOuYIbZSl + rm /tmp/tmp.3ifRzZNu19 /tmp/tmp.PtOuYIbZSl + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.xdfiJTq6xF ++ mktemp + local LAST_ERR=/tmp/tmp.u4xDnKIYae + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xdfiJTq6xF namespace/psmdb-operator created + cat /tmp/tmp.u4xDnKIYae + rm /tmp/tmp.xdfiJTq6xF /tmp/tmp.u4xDnKIYae + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Lejvm68G4a +++ mktemp ++ local LAST_ERR=/tmp/tmp.gUkzZe68ge ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Lejvm68G4a ++ cat /tmp/tmp.gUkzZe68ge ++ rm /tmp/tmp.Lejvm68G4a /tmp/tmp.gUkzZe68ge ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster13 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Z1Dk4Go7lW ++ mktemp + local LAST_ERR=/tmp/tmp.gHk1mrueJT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster13 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z1Dk4Go7lW Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster13" modified. + cat /tmp/tmp.gHk1mrueJT + rm /tmp/tmp.Z1Dk4Go7lW /tmp/tmp.gHk1mrueJT + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.dWxdYlFfJV ++ mktemp + local LAST_ERR=/tmp/tmp.IZ3H3pK4Uw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dWxdYlFfJV customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.IZ3H3pK4Uw + rm /tmp/tmp.dWxdYlFfJV /tmp/tmp.IZ3H3pK4Uw + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.8CdwB6ZkJg ++ mktemp + local LAST_ERR=/tmp/tmp.6BR79h5ios + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8CdwB6ZkJg clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.6BR79h5ios + rm /tmp/tmp.8CdwB6ZkJg /tmp/tmp.6BR79h5ios + return 0 + yq eval $'\n\t\t\t(.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334") |\n\t\t\t((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") |\n\t\t\t((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.CqDWGAnk7o ++ mktemp + local LAST_ERR=/tmp/tmp.we9bDEZ0On + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CqDWGAnk7o deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.we9bDEZ0On + rm /tmp/tmp.CqDWGAnk7o /tmp/tmp.we9bDEZ0On + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.B559T7tx3u +++ mktemp ++ local LAST_ERR=/tmp/tmp.mTvtBjXsOH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B559T7tx3u ++ cat /tmp/tmp.mTvtBjXsOH ++ rm /tmp/tmp.B559T7tx3u /tmp/tmp.mTvtBjXsOH ++ return 0 + wait_operator_pod percona-server-mongodb-operator-578bbff7d7-dvrbn + local pod=percona-server-mongodb-operator-578bbff7d7-dvrbn + set +o xtrace waiting for pod/percona-server-mongodb-operator-578bbff7d7-dvrbn to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.TXed4qPjK0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AFBay7OZwE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TXed4qPjK0 ++ cat /tmp/tmp.AFBay7OZwE ++ rm /tmp/tmp.TXed4qPjK0 /tmp/tmp.AFBay7OZwE ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-578bbff7d7-dvrbn ++ mktemp + local LAST_OUT=/tmp/tmp.jwV9BM9I7L ++ mktemp + local LAST_ERR=/tmp/tmp.aBzchWYcW8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-578bbff7d7-dvrbn + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jwV9BM9I7L + cat /tmp/tmp.aBzchWYcW8 + rm /tmp/tmp.jwV9BM9I7L /tmp/tmp.aBzchWYcW8 + return 0 2026-04-23T10:02:58.676Z INFO setup Manager starting up {"gitCommit": "acb3b334b50f3e35633eb97dbf38451e80253352", "gitBranch": "PR-2274-acb3b334", "buildTime": "", "goVersion": "go1.25.9", "os": "linux", "arch": "amd64"} + create_namespace service-per-pod-110 + local namespace=service-per-pod-110 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + awk '{print$1}' + xargs kubectl delete ns + '[' -n '' ']' + desc 'cleaned up old namespaces service-per-pod-110' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces service-per-pod-110 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace service-per-pod-110 --ignore-not-found + local LAST_OUT=/tmp/tmp.iS4agmwaRi ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.rw63odcExL + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.0r97FjTpQT + for i in $(seq 0 2) + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.MDdFycZMSj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace service-per-pod-110 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iS4agmwaRi + cat /tmp/tmp.rw63odcExL + rm /tmp/tmp.iS4agmwaRi /tmp/tmp.rw63odcExL + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0r97FjTpQT + cat /tmp/tmp.MDdFycZMSj + rm /tmp/tmp.0r97FjTpQT /tmp/tmp.MDdFycZMSj + return 0 + kubectl_bin wait --for=delete namespace service-per-pod-110 ++ mktemp + local LAST_OUT=/tmp/tmp.OfdJc8hY8a ++ mktemp + local LAST_ERR=/tmp/tmp.6NIH70wVnc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace service-per-pod-110 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OfdJc8hY8a + cat /tmp/tmp.6NIH70wVnc + rm /tmp/tmp.OfdJc8hY8a /tmp/tmp.6NIH70wVnc + return 0 + desc 'create namespace service-per-pod-110' + set +o xtrace ----------------------------------------------------------------------------------- create namespace service-per-pod-110 ----------------------------------------------------------------------------------- + kubectl_bin create namespace service-per-pod-110 ++ mktemp + local LAST_OUT=/tmp/tmp.o2qLoUitz3 ++ mktemp + local LAST_ERR=/tmp/tmp.mGuGQoZyXD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace service-per-pod-110 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o2qLoUitz3 namespace/service-per-pod-110 created + cat /tmp/tmp.mGuGQoZyXD + rm /tmp/tmp.o2qLoUitz3 /tmp/tmp.mGuGQoZyXD + return 0 + set_kube_ctx service-per-pod-110 + local namespace=service-per-pod-110 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.keW0bADGgi +++ mktemp ++ local LAST_ERR=/tmp/tmp.MGFWFWSVDi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.keW0bADGgi ++ cat /tmp/tmp.MGFWFWSVDi ++ rm /tmp/tmp.keW0bADGgi /tmp/tmp.MGFWFWSVDi ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster13 --namespace=service-per-pod-110 ++ mktemp + local LAST_OUT=/tmp/tmp.8Tn2VHtFv6 ++ mktemp + local LAST_ERR=/tmp/tmp.RDvweB7UAX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster13 --namespace=service-per-pod-110 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8Tn2VHtFv6 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2274-acb3b334-6-cluster13" modified. + cat /tmp/tmp.RDvweB7UAX + rm /tmp/tmp.8Tn2VHtFv6 /tmp/tmp.RDvweB7UAX + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.FRP1UvbrYb ++ mktemp + local LAST_ERR=/tmp/tmp.Jn9yUlO4wf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FRP1UvbrYb namespace/cert-manager created + cat /tmp/tmp.Jn9yUlO4wf + rm /tmp/tmp.FRP1UvbrYb /tmp/tmp.Jn9yUlO4wf + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.SbUiRQp8Cr ++ mktemp + local LAST_ERR=/tmp/tmp.KmGuCAv1vE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SbUiRQp8Cr namespace/cert-manager labeled + cat /tmp/tmp.KmGuCAv1vE + rm /tmp/tmp.SbUiRQp8Cr /tmp/tmp.KmGuCAv1vE + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.ZMbjDdgUjq ++ mktemp + local LAST_ERR=/tmp/tmp.SkMVCgTGVH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZMbjDdgUjq namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.SkMVCgTGVH Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.ZMbjDdgUjq /tmp/tmp.SkMVCgTGVH + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.jszGhcJduC ++ mktemp + local LAST_ERR=/tmp/tmp.MK4qf2g7L0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jszGhcJduC pod/cert-manager-559d798845-kvr5f condition met pod/cert-manager-cainjector-64958d9c7c-8k2nr condition met pod/cert-manager-webhook-7fb6f99b56-vq5r8 condition met + cat /tmp/tmp.MK4qf2g7L0 + rm /tmp/tmp.jszGhcJduC /tmp/tmp.MK4qf2g7L0 + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.rd1wRLVP5Z ++ mktemp + local LAST_ERR=/tmp/tmp.Sqs7X2m2Rt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rd1wRLVP5Z deployment.apps/psmdb-client created secret/some-users created + cat /tmp/tmp.Sqs7X2m2Rt + rm /tmp/tmp.rd1wRLVP5Z /tmp/tmp.Sqs7X2m2Rt + return 0 + desc 'check ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- check ClusterIP ----------------------------------------------------------------------------------- + check_cr_config cluster-ip-rs0 + local cluster=cluster-ip-rs0 + desc 'create PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334"' + local LAST_OUT=/tmp/tmp.hKNaBW8msd + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-110/g + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.KEWdpZMjbK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hKNaBW8msd perconaservermongodb.psmdb.percona.com/cluster-ip created + cat /tmp/tmp.KEWdpZMjbK + rm /tmp/tmp.hKNaBW8msd /tmp/tmp.KEWdpZMjbK + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cluster-ip-rs0 3 false + local name=cluster-ip-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cluster-ip ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cluster-ip-rs0-0 + local pod=cluster-ip-rs0-0 + set +o xtrace waiting for pod/cluster-ip-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cluster-ip-rs0-1 + local pod=cluster-ip-rs0-1 + set +o xtrace waiting for pod/cluster-ip-rs0-1 to be ready....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HUyFZqlyQI +++ mktemp ++ local LAST_ERR=/tmp/tmp.BowkiZTp8K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HUyFZqlyQI ++ cat /tmp/tmp.BowkiZTp8K ++ rm /tmp/tmp.HUyFZqlyQI /tmp/tmp.BowkiZTp8K ++ return 0 + [[ '' == true ]] + wait_pod cluster-ip-rs0-2 + local pod=cluster-ip-rs0-2 + set +o xtrace waiting for pod/cluster-ip-rs0-2 to be ready......OK ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uqsVXw3XDt +++ mktemp ++ local LAST_ERR=/tmp/tmp.TqxLym9XIS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uqsVXw3XDt ++ cat /tmp/tmp.TqxLym9XIS ++ rm /tmp/tmp.uqsVXw3XDt /tmp/tmp.TqxLym9XIS ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SJE4DOC66u +++ mktemp ++ local LAST_ERR=/tmp/tmp.OG5UIK6biy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SJE4DOC66u ++ cat /tmp/tmp.OG5UIK6biy ++ rm /tmp/tmp.SJE4DOC66u /tmp/tmp.OG5UIK6biy ++ return 0 + [[ '' == true ]] + sleep 10 + [[ false == true ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/cluster-ip-rs0 + local resource=statefulset/cluster-ip-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml + local new_result=/tmp/tmp.vRv2x1gZss/statefulset_cluster-ip-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/cluster-ip-rs0 ++ mktemp + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("service-per-pod-110", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.lJa06ICTB8 ++ mktemp + local LAST_ERR=/tmp/tmp.9uf4c4ri9M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/cluster-ip-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lJa06ICTB8 + cat /tmp/tmp.9uf4c4ri9M + rm /tmp/tmp.lJa06ICTB8 /tmp/tmp.9uf4c4ri9M + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.vRv2x1gZss/statefulset_cluster-ip-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.vRv2x1gZss/statefulset_cluster-ip-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.vRv2x1gZss/statefulset_cluster-ip-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml /tmp/tmp.vRv2x1gZss/statefulset_cluster-ip-rs0.yml + log 'compare_kubectl: statefulset/cluster-ip-rs0 OK' + set +o xtrace [2026-04-23T10:07:27+0000] compare_kubectl: statefulset/cluster-ip-rs0 OK + compare_kubectl service/cluster-ip-rs0-0 + local resource=service/cluster-ip-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml + local new_result=/tmp/tmp.vRv2x1gZss/service_cluster-ip-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/cluster-ip-rs0-0 ++ mktemp + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("service-per-pod-110", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.q4PR4w5LuK ++ mktemp + local LAST_ERR=/tmp/tmp.94H5k1zBGx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/cluster-ip-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q4PR4w5LuK + cat /tmp/tmp.94H5k1zBGx + rm /tmp/tmp.q4PR4w5LuK /tmp/tmp.94H5k1zBGx + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.vRv2x1gZss/service_cluster-ip-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.vRv2x1gZss/service_cluster-ip-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.vRv2x1gZss/service_cluster-ip-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml /tmp/tmp.vRv2x1gZss/service_cluster-ip-rs0-0.yml + log 'compare_kubectl: service/cluster-ip-rs0-0 OK' + set +o xtrace [2026-04-23T10:07:28+0000] compare_kubectl: service/cluster-ip-rs0-0 OK ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9tblDwyKXd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nRU95hI6g1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9tblDwyKXd +++ cat /tmp/tmp.nRU95hI6g1 +++ rm /tmp/tmp.9tblDwyKXd /tmp/tmp.nRU95hI6g1 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.o75gmrjD8c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3lxckBl9zV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.o75gmrjD8c +++ cat /tmp/tmp.3lxckBl9zV +++ rm /tmp/tmp.o75gmrjD8c /tmp/tmp.3lxckBl9zV +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uc57knPzs1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.1GyAzrCZD8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uc57knPzs1 ++ cat /tmp/tmp.1GyAzrCZD8 ++ rm /tmp/tmp.uc57knPzs1 /tmp/tmp.1GyAzrCZD8 ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rg42vOviu5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QhD21RrfHv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rg42vOviu5 +++ cat /tmp/tmp.QhD21RrfHv +++ rm /tmp/tmp.rg42vOviu5 /tmp/tmp.QhD21RrfHv +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HHDpV7QaP9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OPzG5rWbHe +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HHDpV7QaP9 +++ cat /tmp/tmp.OPzG5rWbHe +++ rm /tmp/tmp.HHDpV7QaP9 /tmp/tmp.OPzG5rWbHe +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cJ4SPJnc9I +++ mktemp ++ local LAST_ERR=/tmp/tmp.2ADqlhs2AG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cJ4SPJnc9I ++ cat /tmp/tmp.2ADqlhs2AG ++ rm /tmp/tmp.cJ4SPJnc9I /tmp/tmp.2ADqlhs2AG ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4rOwH0OV5f ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZmVk5PJ2PK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4rOwH0OV5f +++ cat /tmp/tmp.ZmVk5PJ2PK +++ rm /tmp/tmp.4rOwH0OV5f /tmp/tmp.ZmVk5PJ2PK +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AcQDI5fQb7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4aiaO94lSm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AcQDI5fQb7 +++ cat /tmp/tmp.4aiaO94lSm +++ rm /tmp/tmp.AcQDI5fQb7 /tmp/tmp.4aiaO94lSm +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.f1MWpkjMKU +++ mktemp ++ local LAST_ERR=/tmp/tmp.h9pSjlz7s3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.f1MWpkjMKU ++ cat /tmp/tmp.h9pSjlz7s3 ++ rm /tmp/tmp.f1MWpkjMKU /tmp/tmp.h9pSjlz7s3 ++ return 0 ++ return + local URI=34.118.239.179,34.118.232.6,34.118.231.90 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.239.179,34.118.232.6,34.118.231.90 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.239.179,34.118.232.6,34.118.231.90 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@34.118.239.179,34.118.232.6,34.118.231.90 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OLSh0qc6Co +++ mktemp ++ local LAST_ERR=/tmp/tmp.bfRBeGbbfn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OLSh0qc6Co ++ cat /tmp/tmp.bfRBeGbbfn ++ rm /tmp/tmp.OLSh0qc6Co /tmp/tmp.bfRBeGbbfn ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.239.179,34.118.232.6,34.118.231.90:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.QaRXwCyc81 ++ mktemp + local LAST_ERR=/tmp/tmp.hnJ63Nx7sk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.239.179,34.118.232.6,34.118.231.90:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QaRXwCyc81 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.239.179:27017,34.118.232.6:27017,34.118.231.90:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("74bca394-8b31-40d1-8d80-d101ffa42e5a") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.hnJ63Nx7sk + rm /tmp/tmp.QaRXwCyc81 /tmp/tmp.hnJ63Nx7sk + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.239.179,34.118.232.6,34.118.231.90 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.239.179,34.118.232.6,34.118.231.90 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.239.179,34.118.232.6,34.118.231.90 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZOGMybb7tf +++ mktemp ++ local LAST_ERR=/tmp/tmp.mr38wCH440 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZOGMybb7tf ++ cat /tmp/tmp.mr38wCH440 ++ rm /tmp/tmp.ZOGMybb7tf /tmp/tmp.mr38wCH440 ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.239.179,34.118.232.6,34.118.231.90:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.byNrk0vmHa ++ mktemp + local LAST_ERR=/tmp/tmp.wt3wL6y0YD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.239.179,34.118.232.6,34.118.231.90:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.byNrk0vmHa Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.239.179:27017,34.118.232.6:27017,34.118.231.90:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("97342f66-1c7f-44f6-88ab-7ced40a4b7c5") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.wt3wL6y0YD + rm /tmp/tmp.byNrk0vmHa /tmp/tmp.wt3wL6y0YD + return 0 + sleep 30 ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.e2djMZqvx3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bJGik04XJ9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.e2djMZqvx3 +++ cat /tmp/tmp.bJGik04XJ9 +++ rm /tmp/tmp.e2djMZqvx3 /tmp/tmp.bJGik04XJ9 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5hQ4PQclv2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6fSsZSSJ7l +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5hQ4PQclv2 +++ cat /tmp/tmp.6fSsZSSJ7l +++ rm /tmp/tmp.5hQ4PQclv2 /tmp/tmp.6fSsZSSJ7l +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T0vGhKdSzS +++ mktemp ++ local LAST_ERR=/tmp/tmp.UZqlMM6HGn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T0vGhKdSzS ++ cat /tmp/tmp.UZqlMM6HGn ++ rm /tmp/tmp.T0vGhKdSzS /tmp/tmp.UZqlMM6HGn ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.239.179 '' :27017 + local command=find + local uri=myApp:myPass@34.118.239.179 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:09:00+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.239.179 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.239.179 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.239.179 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rF0d3BSlQY +++ mktemp ++ local LAST_ERR=/tmp/tmp.YW5QPdU5Gb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rF0d3BSlQY ++ cat /tmp/tmp.YW5QPdU5Gb ++ rm /tmp/tmp.rF0d3BSlQY /tmp/tmp.YW5QPdU5Gb ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.239.179:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0fjKn8s74p ++ mktemp + local LAST_ERR=/tmp/tmp.an4WahOx15 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.239.179:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0fjKn8s74p + cat /tmp/tmp.an4WahOx15 + rm /tmp/tmp.0fjKn8s74p /tmp/tmp.an4WahOx15 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.u4SrGBttQl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wLehoJ3tQp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.u4SrGBttQl +++ cat /tmp/tmp.wLehoJ3tQp +++ rm /tmp/tmp.u4SrGBttQl /tmp/tmp.wLehoJ3tQp +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0o2EEExNYw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CXwBWCvb5k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0o2EEExNYw +++ cat /tmp/tmp.CXwBWCvb5k +++ rm /tmp/tmp.0o2EEExNYw /tmp/tmp.CXwBWCvb5k +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4U1zLTm0C9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DyRwjU4Fau ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4U1zLTm0C9 ++ cat /tmp/tmp.DyRwjU4Fau ++ rm /tmp/tmp.4U1zLTm0C9 /tmp/tmp.DyRwjU4Fau ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.232.6 '' :27017 + local command=find + local uri=myApp:myPass@34.118.232.6 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:09:07+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.232.6 mongodb :27017 '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.232.6 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.232.6 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3JobNqrC0C +++ mktemp ++ local LAST_ERR=/tmp/tmp.eR1AzeLdMf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3JobNqrC0C ++ cat /tmp/tmp.eR1AzeLdMf ++ rm /tmp/tmp.3JobNqrC0C /tmp/tmp.eR1AzeLdMf ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.6:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.B1JA5lXcr0 ++ mktemp + local LAST_ERR=/tmp/tmp.5vj2DVa7PF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.6:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B1JA5lXcr0 + cat /tmp/tmp.5vj2DVa7PF + rm /tmp/tmp.B1JA5lXcr0 /tmp/tmp.5vj2DVa7PF + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wc3UNCjGQq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xBZxL37QcJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wc3UNCjGQq +++ cat /tmp/tmp.xBZxL37QcJ +++ rm /tmp/tmp.wc3UNCjGQq /tmp/tmp.xBZxL37QcJ +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DNxcVK7ZD0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TPByPYsPFM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DNxcVK7ZD0 +++ cat /tmp/tmp.TPByPYsPFM +++ rm /tmp/tmp.DNxcVK7ZD0 /tmp/tmp.TPByPYsPFM +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O5mNFaSvYF +++ mktemp ++ local LAST_ERR=/tmp/tmp.bOa6bnuZ1v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.O5mNFaSvYF ++ cat /tmp/tmp.bOa6bnuZ1v ++ rm /tmp/tmp.O5mNFaSvYF /tmp/tmp.bOa6bnuZ1v ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.231.90 '' :27017 + local command=find + local uri=myApp:myPass@34.118.231.90 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:09:13+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.231.90 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@34.118.231.90 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.231.90 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tCiXXl4yat +++ mktemp ++ local LAST_ERR=/tmp/tmp.cOPahO4Ees ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tCiXXl4yat ++ cat /tmp/tmp.cOPahO4Ees ++ rm /tmp/tmp.tCiXXl4yat /tmp/tmp.cOPahO4Ees ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.231.90:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.W4IxBxHA5X ++ mktemp + local LAST_ERR=/tmp/tmp.zhGHzQUDef + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.231.90:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W4IxBxHA5X + cat /tmp/tmp.zhGHzQUDef + rm /tmp/tmp.W4IxBxHA5X /tmp/tmp.zhGHzQUDef + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find + [[ cluster-ip-rs0 == node\-port\-rs0 ]] + desc 'delete PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.edoOAE8UEp ++ mktemp + local LAST_ERR=/tmp/tmp.KGHRLfF8m2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.edoOAE8UEp perconaservermongodb.psmdb.percona.com "cluster-ip" deleted from service-per-pod-110 namespace + cat /tmp/tmp.KGHRLfF8m2 + rm /tmp/tmp.edoOAE8UEp /tmp/tmp.KGHRLfF8m2 + return 0 + desc 'check LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- check LoadBalancer ----------------------------------------------------------------------------------- + check_cr_config local-balancer-rs0 + local cluster=local-balancer-rs0 + desc 'create PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + kubectl_bin apply -f - + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-110/g + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.X5JgxdJogi ++ mktemp + local LAST_ERR=/tmp/tmp.XyLmbBjDHc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X5JgxdJogi perconaservermongodb.psmdb.percona.com/local-balancer created + cat /tmp/tmp.XyLmbBjDHc + rm /tmp/tmp.X5JgxdJogi /tmp/tmp.XyLmbBjDHc + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running local-balancer-rs0 3 false + local name=local-balancer-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=local-balancer ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod local-balancer-rs0-0 + local pod=local-balancer-rs0-0 + set +o xtrace waiting for pod/local-balancer-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod local-balancer-rs0-1 + local pod=local-balancer-rs0-1 + set +o xtrace waiting for pod/local-balancer-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7aXVPaBmsi +++ mktemp ++ local LAST_ERR=/tmp/tmp.DQSUJQne9E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7aXVPaBmsi ++ cat /tmp/tmp.DQSUJQne9E ++ rm /tmp/tmp.7aXVPaBmsi /tmp/tmp.DQSUJQne9E ++ return 0 + [[ '' == true ]] + wait_pod local-balancer-rs0-2 + local pod=local-balancer-rs0-2 + set +o xtrace waiting for pod/local-balancer-rs0-2 to be ready....OK ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qmsn8qrKc7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zdOahuy9Fi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qmsn8qrKc7 ++ cat /tmp/tmp.zdOahuy9Fi ++ rm /tmp/tmp.Qmsn8qrKc7 /tmp/tmp.zdOahuy9Fi ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G2Rx2GowjD +++ mktemp ++ local LAST_ERR=/tmp/tmp.SvSdrmUcGf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G2Rx2GowjD ++ cat /tmp/tmp.SvSdrmUcGf ++ rm /tmp/tmp.G2Rx2GowjD /tmp/tmp.SvSdrmUcGf ++ return 0 + [[ '' == true ]] + sleep 10 + [[ false == true ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/local-balancer-rs0 + local resource=statefulset/local-balancer-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml + local new_result=/tmp/tmp.vRv2x1gZss/statefulset_local-balancer-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/local-balancer-rs0 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("service-per-pod-110", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.lzdZMO47HQ ++ mktemp + local LAST_ERR=/tmp/tmp.9q4dGACBvX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/local-balancer-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lzdZMO47HQ + cat /tmp/tmp.9q4dGACBvX + rm /tmp/tmp.lzdZMO47HQ /tmp/tmp.9q4dGACBvX + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.vRv2x1gZss/statefulset_local-balancer-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.vRv2x1gZss/statefulset_local-balancer-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.vRv2x1gZss/statefulset_local-balancer-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml /tmp/tmp.vRv2x1gZss/statefulset_local-balancer-rs0.yml + log 'compare_kubectl: statefulset/local-balancer-rs0 OK' + set +o xtrace [2026-04-23T10:10:43+0000] compare_kubectl: statefulset/local-balancer-rs0 OK + compare_kubectl service/local-balancer-rs0-0 + local resource=service/local-balancer-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml + local new_result=/tmp/tmp.vRv2x1gZss/service_local-balancer-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/local-balancer-rs0-0 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("service-per-pod-110", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.AWX0R0WqF1 ++ mktemp + local LAST_ERR=/tmp/tmp.5TeOTHOfzT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/local-balancer-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AWX0R0WqF1 + cat /tmp/tmp.5TeOTHOfzT + rm /tmp/tmp.AWX0R0WqF1 /tmp/tmp.5TeOTHOfzT + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.vRv2x1gZss/service_local-balancer-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.vRv2x1gZss/service_local-balancer-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.vRv2x1gZss/service_local-balancer-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml /tmp/tmp.vRv2x1gZss/service_local-balancer-rs0-0.yml + log 'compare_kubectl: service/local-balancer-rs0-0 OK' + set +o xtrace [2026-04-23T10:10:44+0000] compare_kubectl: service/local-balancer-rs0-0 OK ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LV6HfXkQdL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3HItZITOeO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LV6HfXkQdL +++ cat /tmp/tmp.3HItZITOeO +++ rm /tmp/tmp.LV6HfXkQdL /tmp/tmp.3HItZITOeO +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ziRf5haqRi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YcedyOixVA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ziRf5haqRi +++ cat /tmp/tmp.YcedyOixVA +++ rm /tmp/tmp.ziRf5haqRi /tmp/tmp.YcedyOixVA +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.quBpzdqBNu +++ mktemp ++ local LAST_ERR=/tmp/tmp.rKwYcCkHST ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.quBpzdqBNu ++ cat /tmp/tmp.rKwYcCkHST ++ rm /tmp/tmp.quBpzdqBNu /tmp/tmp.rKwYcCkHST ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.718RyHxD05 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xc9vBsuGXq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.718RyHxD05 ++ cat /tmp/tmp.Xc9vBsuGXq ++ rm /tmp/tmp.718RyHxD05 /tmp/tmp.Xc9vBsuGXq ++ return 0 ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lZK7FLeh70 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yyF4IiWEUt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lZK7FLeh70 +++ cat /tmp/tmp.yyF4IiWEUt +++ rm /tmp/tmp.lZK7FLeh70 /tmp/tmp.yyF4IiWEUt +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b5Y88axYuQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.upsPyqh7PD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.b5Y88axYuQ +++ cat /tmp/tmp.upsPyqh7PD +++ rm /tmp/tmp.b5Y88axYuQ /tmp/tmp.upsPyqh7PD +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MlwmOKGbSm +++ mktemp ++ local LAST_ERR=/tmp/tmp.I3DMq2m8Np ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MlwmOKGbSm ++ cat /tmp/tmp.I3DMq2m8Np ++ rm /tmp/tmp.MlwmOKGbSm /tmp/tmp.I3DMq2m8Np ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rfFAO3jNUO +++ mktemp ++ local LAST_ERR=/tmp/tmp.r6DhUms2Ez ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rfFAO3jNUO ++ cat /tmp/tmp.r6DhUms2Ez ++ rm /tmp/tmp.rfFAO3jNUO /tmp/tmp.r6DhUms2Ez ++ return 0 ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UeCfiKD3cW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2slqK6ada1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UeCfiKD3cW +++ cat /tmp/tmp.2slqK6ada1 +++ rm /tmp/tmp.UeCfiKD3cW /tmp/tmp.2slqK6ada1 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Hv2JaVosGw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Y4VVRbqd6T +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Hv2JaVosGw +++ cat /tmp/tmp.Y4VVRbqd6T +++ rm /tmp/tmp.Hv2JaVosGw /tmp/tmp.Y4VVRbqd6T +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.voClzEbGPt +++ mktemp ++ local LAST_ERR=/tmp/tmp.bN95kXpNJP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.voClzEbGPt ++ cat /tmp/tmp.bN95kXpNJP ++ rm /tmp/tmp.voClzEbGPt /tmp/tmp.bN95kXpNJP ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VoIr8QX1Eh +++ mktemp ++ local LAST_ERR=/tmp/tmp.BtZn7OW3s6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VoIr8QX1Eh ++ cat /tmp/tmp.BtZn7OW3s6 ++ rm /tmp/tmp.VoIr8QX1Eh /tmp/tmp.BtZn7OW3s6 ++ return 0 + local URI=34.28.242.232,35.184.150.29,35.192.154.178 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.28.242.232,35.184.150.29,35.192.154.178 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.28.242.232,35.184.150.29,35.192.154.178 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@34.28.242.232,35.184.150.29,35.192.154.178 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2OKP616Dgr +++ mktemp ++ local LAST_ERR=/tmp/tmp.6K4iDooHvw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2OKP616Dgr ++ cat /tmp/tmp.6K4iDooHvw ++ rm /tmp/tmp.2OKP616Dgr /tmp/tmp.6K4iDooHvw ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.28.242.232,35.184.150.29,35.192.154.178:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RBZUPTgK74 ++ mktemp + local LAST_ERR=/tmp/tmp.QyRcH1nvgE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.28.242.232,35.184.150.29,35.192.154.178:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RBZUPTgK74 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.28.242.232:27017,35.184.150.29:27017,35.192.154.178:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("c481b042-65b2-4084-b983-2e737d72ea76") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.QyRcH1nvgE + rm /tmp/tmp.RBZUPTgK74 /tmp/tmp.QyRcH1nvgE + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.28.242.232,35.184.150.29,35.192.154.178 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.28.242.232,35.184.150.29,35.192.154.178 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.28.242.232,35.184.150.29,35.192.154.178 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xeRvdAnuQo +++ mktemp ++ local LAST_ERR=/tmp/tmp.1qGrIHM1mG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xeRvdAnuQo ++ cat /tmp/tmp.1qGrIHM1mG ++ rm /tmp/tmp.xeRvdAnuQo /tmp/tmp.1qGrIHM1mG ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.28.242.232,35.184.150.29,35.192.154.178:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.yA16dEXkdu ++ mktemp + local LAST_ERR=/tmp/tmp.UYJSj7oM2q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.28.242.232,35.184.150.29,35.192.154.178:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yA16dEXkdu Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.28.242.232:27017,35.184.150.29:27017,35.192.154.178:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2d11a4bd-721c-403f-83ea-bf3d11361e50") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.UYJSj7oM2q + rm /tmp/tmp.yA16dEXkdu /tmp/tmp.UYJSj7oM2q + return 0 + sleep 30 ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FtobOIHO8p ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pQ82USbVPG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FtobOIHO8p +++ cat /tmp/tmp.pQ82USbVPG +++ rm /tmp/tmp.FtobOIHO8p /tmp/tmp.pQ82USbVPG +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.61MK0sNllV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5UWqT3BuM2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.61MK0sNllV +++ cat /tmp/tmp.5UWqT3BuM2 +++ rm /tmp/tmp.61MK0sNllV /tmp/tmp.5UWqT3BuM2 +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2TKSZ3ToLA +++ mktemp ++ local LAST_ERR=/tmp/tmp.mVkfaW15ml ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2TKSZ3ToLA ++ cat /tmp/tmp.mVkfaW15ml ++ rm /tmp/tmp.2TKSZ3ToLA /tmp/tmp.mVkfaW15ml ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.839IIaIAfJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.5jEG9d4eGv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.839IIaIAfJ ++ cat /tmp/tmp.5jEG9d4eGv ++ rm /tmp/tmp.839IIaIAfJ /tmp/tmp.5jEG9d4eGv ++ return 0 + compare_mongo_cmd find myApp:myPass@34.28.242.232 '' :27017 + local command=find + local uri=myApp:myPass@34.28.242.232 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:12:46+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.28.242.232 mongodb :27017 '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.28.242.232 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.28.242.232 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FEILRGBOmE +++ mktemp ++ local LAST_ERR=/tmp/tmp.SiDgqGXRJB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FEILRGBOmE ++ cat /tmp/tmp.SiDgqGXRJB ++ rm /tmp/tmp.FEILRGBOmE /tmp/tmp.SiDgqGXRJB ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.28.242.232:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RTWa3lTKOn ++ mktemp + local LAST_ERR=/tmp/tmp.F8CRuHIJzf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.28.242.232:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RTWa3lTKOn + cat /tmp/tmp.F8CRuHIJzf + rm /tmp/tmp.RTWa3lTKOn /tmp/tmp.F8CRuHIJzf + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ElfFAku7Eb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QuLKgFcBya +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ElfFAku7Eb +++ cat /tmp/tmp.QuLKgFcBya +++ rm /tmp/tmp.ElfFAku7Eb /tmp/tmp.QuLKgFcBya +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9FBdh9KI0g ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F6BAWxJNa1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9FBdh9KI0g +++ cat /tmp/tmp.F6BAWxJNa1 +++ rm /tmp/tmp.9FBdh9KI0g /tmp/tmp.F6BAWxJNa1 +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4L4rsLqiQt +++ mktemp ++ local LAST_ERR=/tmp/tmp.cr586tBcFC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4L4rsLqiQt ++ cat /tmp/tmp.cr586tBcFC ++ rm /tmp/tmp.4L4rsLqiQt /tmp/tmp.cr586tBcFC ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lZTM1p80Vr +++ mktemp ++ local LAST_ERR=/tmp/tmp.A0LKGFC4bJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lZTM1p80Vr ++ cat /tmp/tmp.A0LKGFC4bJ ++ rm /tmp/tmp.lZTM1p80Vr /tmp/tmp.A0LKGFC4bJ ++ return 0 + compare_mongo_cmd find myApp:myPass@35.184.150.29 '' :27017 + local command=find + local uri=myApp:myPass@35.184.150.29 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:12:54+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@35.184.150.29 mongodb :27017 '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@35.184.150.29 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@35.184.150.29 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CcLGB0aTTN +++ mktemp ++ local LAST_ERR=/tmp/tmp.QENxOKPhth ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CcLGB0aTTN ++ cat /tmp/tmp.QENxOKPhth ++ rm /tmp/tmp.CcLGB0aTTN /tmp/tmp.QENxOKPhth ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.184.150.29:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.x35MpiiT9g ++ mktemp + local LAST_ERR=/tmp/tmp.NwUrQFGXGn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.184.150.29:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.x35MpiiT9g + cat /tmp/tmp.NwUrQFGXGn + rm /tmp/tmp.x35MpiiT9g /tmp/tmp.NwUrQFGXGn + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eZlqWB4Tl7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dkmdv3lF3r +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eZlqWB4Tl7 +++ cat /tmp/tmp.dkmdv3lF3r +++ rm /tmp/tmp.eZlqWB4Tl7 /tmp/tmp.dkmdv3lF3r +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kSx20Iqasl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MWbtxDjYy1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kSx20Iqasl +++ cat /tmp/tmp.MWbtxDjYy1 +++ rm /tmp/tmp.kSx20Iqasl /tmp/tmp.MWbtxDjYy1 +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wS7orj1W64 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9SFmdy0PUn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wS7orj1W64 ++ cat /tmp/tmp.9SFmdy0PUn ++ rm /tmp/tmp.wS7orj1W64 /tmp/tmp.9SFmdy0PUn ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WblJpzTYgJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.kSWhhRKzvC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WblJpzTYgJ ++ cat /tmp/tmp.kSWhhRKzvC ++ rm /tmp/tmp.WblJpzTYgJ /tmp/tmp.kSWhhRKzvC ++ return 0 + compare_mongo_cmd find myApp:myPass@35.192.154.178 '' :27017 + local command=find + local uri=myApp:myPass@35.192.154.178 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:13:03+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@35.192.154.178 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@35.192.154.178 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@35.192.154.178 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6Z4PyAzVQC +++ mktemp ++ local LAST_ERR=/tmp/tmp.2japWkwf5j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6Z4PyAzVQC ++ cat /tmp/tmp.2japWkwf5j ++ rm /tmp/tmp.6Z4PyAzVQC /tmp/tmp.2japWkwf5j ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.192.154.178:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.fFglfdDfZR ++ mktemp + local LAST_ERR=/tmp/tmp.30sgfE9Abh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@35.192.154.178:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fFglfdDfZR + cat /tmp/tmp.30sgfE9Abh + rm /tmp/tmp.fFglfdDfZR /tmp/tmp.30sgfE9Abh + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find + [[ local-balancer-rs0 == node\-port\-rs0 ]] + desc 'delete PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.uPm4tNHuJ3 ++ mktemp + local LAST_ERR=/tmp/tmp.6MnHb2DIGl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uPm4tNHuJ3 perconaservermongodb.psmdb.percona.com "local-balancer" deleted from service-per-pod-110 namespace + cat /tmp/tmp.6MnHb2DIGl + rm /tmp/tmp.uPm4tNHuJ3 /tmp/tmp.6MnHb2DIGl + return 0 + desc 'check NodePort' + set +o xtrace ----------------------------------------------------------------------------------- check NodePort ----------------------------------------------------------------------------------- + check_cr_config node-port-rs0 + local cluster=node-port-rs0 + desc 'create PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/node-port-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/node-port-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/node-port-rs0.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + local LAST_OUT=/tmp/tmp.2RJdgyu4nG + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-110/g + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334"' ++ mktemp + local LAST_ERR=/tmp/tmp.ULPwdZj6rV + local exit_status=0 + local timeout=4 ++ seq 0 2 + yq eval '.spec.upgradeOptions.apply="Never"' + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2RJdgyu4nG perconaservermongodb.psmdb.percona.com/node-port created + cat /tmp/tmp.ULPwdZj6rV + rm /tmp/tmp.2RJdgyu4nG /tmp/tmp.ULPwdZj6rV + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running node-port-rs0 3 false + local name=node-port-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=node-port ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod node-port-rs0-0 + local pod=node-port-rs0-0 + set +o xtrace waiting for pod/node-port-rs0-0 to be ready........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod node-port-rs0-1 + local pod=node-port-rs0-1 + set +o xtrace waiting for pod/node-port-rs0-1 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NjhgpjPzDb +++ mktemp ++ local LAST_ERR=/tmp/tmp.fmywpg02AS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NjhgpjPzDb ++ cat /tmp/tmp.fmywpg02AS ++ rm /tmp/tmp.NjhgpjPzDb /tmp/tmp.fmywpg02AS ++ return 0 + [[ '' == true ]] + wait_pod node-port-rs0-2 + local pod=node-port-rs0-2 + set +o xtrace waiting for pod/node-port-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SXerwTDvAT +++ mktemp ++ local LAST_ERR=/tmp/tmp.mYwrgLiYJG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SXerwTDvAT ++ cat /tmp/tmp.mYwrgLiYJG ++ rm /tmp/tmp.SXerwTDvAT /tmp/tmp.mYwrgLiYJG ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5u50AROeVw +++ mktemp ++ local LAST_ERR=/tmp/tmp.T5axNq5Diq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5u50AROeVw ++ cat /tmp/tmp.T5axNq5Diq ++ rm /tmp/tmp.5u50AROeVw /tmp/tmp.T5axNq5Diq ++ return 0 + [[ '' == true ]] + sleep 10 + [[ false == true ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/node-port-rs0 + local resource=statefulset/node-port-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml + local new_result=/tmp/tmp.vRv2x1gZss/statefulset_node-port-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/node-port-rs0 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("service-per-pod-110", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vlVms8SJig ++ mktemp + local LAST_ERR=/tmp/tmp.reGWJ02sTP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/node-port-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vlVms8SJig + cat /tmp/tmp.reGWJ02sTP + rm /tmp/tmp.vlVms8SJig /tmp/tmp.reGWJ02sTP + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.vRv2x1gZss/statefulset_node-port-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.vRv2x1gZss/statefulset_node-port-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.vRv2x1gZss/statefulset_node-port-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml /tmp/tmp.vRv2x1gZss/statefulset_node-port-rs0.yml + log 'compare_kubectl: statefulset/node-port-rs0 OK' + set +o xtrace [2026-04-23T10:14:35+0000] compare_kubectl: statefulset/node-port-rs0 OK + compare_kubectl service/node-port-rs0-0 + local resource=service/node-port-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml + local new_result=/tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("service-per-pod-110", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.bdMFyzhQqy ++ mktemp + local LAST_ERR=/tmp/tmp.L1s1pjgwlY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/node-port-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bdMFyzhQqy + cat /tmp/tmp.L1s1pjgwlY + rm /tmp/tmp.bdMFyzhQqy /tmp/tmp.L1s1pjgwlY + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + log 'compare_kubectl: service/node-port-rs0-0 OK' + set +o xtrace [2026-04-23T10:14:36+0000] compare_kubectl: service/node-port-rs0-0 OK ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2pNSHEkytN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gWyp7GKIVc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2pNSHEkytN +++ cat /tmp/tmp.gWyp7GKIVc +++ rm /tmp/tmp.2pNSHEkytN /tmp/tmp.gWyp7GKIVc +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.W3AXWmBTi7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GVgbHazagE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.W3AXWmBTi7 +++ cat /tmp/tmp.GVgbHazagE +++ rm /tmp/tmp.W3AXWmBTi7 /tmp/tmp.GVgbHazagE +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q9izdIGav1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.29nPfTJL6d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q9izdIGav1 ++ cat /tmp/tmp.29nPfTJL6d ++ rm /tmp/tmp.Q9izdIGav1 /tmp/tmp.29nPfTJL6d ++ return 0 ++ return ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lVMdk8Y4Fb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uZkibyNpTp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lVMdk8Y4Fb +++ cat /tmp/tmp.uZkibyNpTp +++ rm /tmp/tmp.lVMdk8Y4Fb /tmp/tmp.uZkibyNpTp +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NVXkM2BqD4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pULTq41Tlv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NVXkM2BqD4 +++ cat /tmp/tmp.pULTq41Tlv +++ rm /tmp/tmp.NVXkM2BqD4 /tmp/tmp.pULTq41Tlv +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wukKBwYmL3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aA08gzHDPJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wukKBwYmL3 ++ cat /tmp/tmp.aA08gzHDPJ ++ rm /tmp/tmp.wukKBwYmL3 /tmp/tmp.aA08gzHDPJ ++ return 0 ++ return ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VoxwQNzhsm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VU85Xf6YjN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.VoxwQNzhsm +++ cat /tmp/tmp.VU85Xf6YjN +++ rm /tmp/tmp.VoxwQNzhsm /tmp/tmp.VU85Xf6YjN +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oveawwlEE7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fytT2mMR8P +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oveawwlEE7 +++ cat /tmp/tmp.fytT2mMR8P +++ rm /tmp/tmp.oveawwlEE7 /tmp/tmp.fytT2mMR8P +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8ZaXMqmSek +++ mktemp ++ local LAST_ERR=/tmp/tmp.lw2BqnwQjI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8ZaXMqmSek ++ cat /tmp/tmp.lw2BqnwQjI ++ rm /tmp/tmp.8ZaXMqmSek /tmp/tmp.lw2BqnwQjI ++ return 0 ++ return + local URI=34.118.237.30,34.118.233.85,34.118.234.85 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.237.30,34.118.233.85,34.118.234.85 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.237.30,34.118.233.85,34.118.234.85 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@34.118.237.30,34.118.233.85,34.118.234.85 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fb6xGYWLIK +++ mktemp ++ local LAST_ERR=/tmp/tmp.CsKIJJeJUE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fb6xGYWLIK ++ cat /tmp/tmp.CsKIJJeJUE ++ rm /tmp/tmp.fb6xGYWLIK /tmp/tmp.CsKIJJeJUE ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.237.30,34.118.233.85,34.118.234.85:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.JiiSYOuiuv ++ mktemp + local LAST_ERR=/tmp/tmp.LQb5Vg9zdo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.237.30,34.118.233.85,34.118.234.85:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JiiSYOuiuv Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.237.30:27017,34.118.233.85:27017,34.118.234.85:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("6f30f79c-49ae-4c9a-800f-16d2ed7f5bc9") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.LQb5Vg9zdo + rm /tmp/tmp.JiiSYOuiuv /tmp/tmp.LQb5Vg9zdo + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.237.30,34.118.233.85,34.118.234.85 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.237.30,34.118.233.85,34.118.234.85 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.237.30,34.118.233.85,34.118.234.85 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lQ72hB33D6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aF3e2Szcwh ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lQ72hB33D6 ++ cat /tmp/tmp.aF3e2Szcwh ++ rm /tmp/tmp.lQ72hB33D6 /tmp/tmp.aF3e2Szcwh ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.237.30,34.118.233.85,34.118.234.85:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.4jh8NxlN4A ++ mktemp + local LAST_ERR=/tmp/tmp.XE9LpL2Kjv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.237.30,34.118.233.85,34.118.234.85:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4jh8NxlN4A Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.237.30:27017,34.118.233.85:27017,34.118.234.85:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("2722e380-a2d0-43cd-90dd-a9e22e76d175") } Percona Server for MongoDB server version: v8.0.20-8 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.XE9LpL2Kjv + rm /tmp/tmp.4jh8NxlN4A /tmp/tmp.XE9LpL2Kjv + return 0 + sleep 30 ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fbda31eef7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LZvQ6RcsjC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fbda31eef7 +++ cat /tmp/tmp.LZvQ6RcsjC +++ rm /tmp/tmp.fbda31eef7 /tmp/tmp.LZvQ6RcsjC +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Uz7XkUJNIJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cMJeB9ntza +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Uz7XkUJNIJ +++ cat /tmp/tmp.cMJeB9ntza +++ rm /tmp/tmp.Uz7XkUJNIJ /tmp/tmp.cMJeB9ntza +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j2qvA6cjFi +++ mktemp ++ local LAST_ERR=/tmp/tmp.JFGwEWXm53 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j2qvA6cjFi ++ cat /tmp/tmp.JFGwEWXm53 ++ rm /tmp/tmp.j2qvA6cjFi /tmp/tmp.JFGwEWXm53 ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.237.30 '' :27017 + local command=find + local uri=myApp:myPass@34.118.237.30 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:16:06+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.237.30 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.237.30 + local driver=mongodb + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local suffix=:27017 + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.237.30 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DCcEzB72Y8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YYkmvRjYcv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DCcEzB72Y8 ++ cat /tmp/tmp.YYkmvRjYcv ++ rm /tmp/tmp.DCcEzB72Y8 /tmp/tmp.YYkmvRjYcv ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.237.30:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.FC4Lt3kdNV ++ mktemp + local LAST_ERR=/tmp/tmp.Ph9oyoS9lb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.237.30:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FC4Lt3kdNV + cat /tmp/tmp.Ph9oyoS9lb + rm /tmp/tmp.FC4Lt3kdNV /tmp/tmp.Ph9oyoS9lb + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ccPFYZtPtw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ot3EFAJszE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ccPFYZtPtw +++ cat /tmp/tmp.Ot3EFAJszE +++ rm /tmp/tmp.ccPFYZtPtw /tmp/tmp.Ot3EFAJszE +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fVdfsZXaOA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yTrmnz89KD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.fVdfsZXaOA +++ cat /tmp/tmp.yTrmnz89KD +++ rm /tmp/tmp.fVdfsZXaOA /tmp/tmp.yTrmnz89KD +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AF3StjqspU +++ mktemp ++ local LAST_ERR=/tmp/tmp.PR9GwvKdeN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AF3StjqspU ++ cat /tmp/tmp.PR9GwvKdeN ++ rm /tmp/tmp.AF3StjqspU /tmp/tmp.PR9GwvKdeN ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.233.85 '' :27017 + local command=find + local uri=myApp:myPass@34.118.233.85 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:16:11+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.233.85 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.233.85 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.233.85 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3Q4VWzlTQk +++ mktemp ++ local LAST_ERR=/tmp/tmp.49JHZeFlsX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3Q4VWzlTQk ++ cat /tmp/tmp.49JHZeFlsX ++ rm /tmp/tmp.3Q4VWzlTQk /tmp/tmp.49JHZeFlsX ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.233.85:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.UsZjDkZBKG ++ mktemp + local LAST_ERR=/tmp/tmp.kYL9dRqj8M + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.233.85:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UsZjDkZBKG + cat /tmp/tmp.kYL9dRqj8M + rm /tmp/tmp.UsZjDkZBKG /tmp/tmp.kYL9dRqj8M + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Cuaolz2rhH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2pQWj7ymql +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Cuaolz2rhH +++ cat /tmp/tmp.2pQWj7ymql +++ rm /tmp/tmp.Cuaolz2rhH /tmp/tmp.2pQWj7ymql +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hIIOw7MLeP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KF1vN1ec0j +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hIIOw7MLeP +++ cat /tmp/tmp.KF1vN1ec0j +++ rm /tmp/tmp.hIIOw7MLeP /tmp/tmp.KF1vN1ec0j +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jSDVuOtK4A +++ mktemp ++ local LAST_ERR=/tmp/tmp.lPaQS6KDUi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jSDVuOtK4A ++ cat /tmp/tmp.lPaQS6KDUi ++ rm /tmp/tmp.jSDVuOtK4A /tmp/tmp.lPaQS6KDUi ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.234.85 '' :27017 + local command=find + local uri=myApp:myPass@34.118.234.85 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-04-23T10:16:18+0000] running db.test.find() in myApp + [[ false == true ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.234.85 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.234.85 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.234.85 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ddb0WQ3Q93 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gZ4uDv7kRN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ddb0WQ3Q93 ++ cat /tmp/tmp.gZ4uDv7kRN ++ rm /tmp/tmp.ddb0WQ3Q93 /tmp/tmp.gZ4uDv7kRN ++ return 0 + local client_container=psmdb-client-bb8b97679-m8xkm + kubectl_bin exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.234.85:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.gHwKINCmeN ++ mktemp + local LAST_ERR=/tmp/tmp.FMTKLJ1h5Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-m8xkm -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.234.85:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gHwKINCmeN + cat /tmp/tmp.FMTKLJ1h5Q + rm /tmp/tmp.gHwKINCmeN /tmp/tmp.FMTKLJ1h5Q + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.vRv2x1gZss/find + [[ node-port-rs0 == node\-port\-rs0 ]] + desc 'add service-per-pod label and annotation' + set +o xtrace ----------------------------------------------------------------------------------- add service-per-pod label and annotation ----------------------------------------------------------------------------------- ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q6qG1yyIbi +++ mktemp ++ local LAST_ERR=/tmp/tmp.csDaFVq1dn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q6qG1yyIbi ++ cat /tmp/tmp.csDaFVq1dn ++ rm /tmp/tmp.Q6qG1yyIbi /tmp/tmp.csDaFVq1dn ++ return 0 + old_node_port=31710 + kubectl_bin patch psmdb node-port --type=json --patch $'[\n\t\t{\n\t\t\t"op": "add",\n\t\t\t"path": "/spec/replsets/0/expose/annotations",\n\t\t\t"value": {\n\t\t\t\t"test": "service-per-pod",\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t"op": "add",\n\t\t\t"path": "/spec/replsets/0/expose/labels",\n\t\t\t"value": {\n\t\t\t\t"test": "service-per-pod",\n\t\t\t}\n\t\t}]' ++ mktemp + local LAST_OUT=/tmp/tmp.I7EfLRdb2L ++ mktemp + local LAST_ERR=/tmp/tmp.pILGnLRMx1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb node-port --type=json --patch $'[\n\t\t{\n\t\t\t"op": "add",\n\t\t\t"path": "/spec/replsets/0/expose/annotations",\n\t\t\t"value": {\n\t\t\t\t"test": "service-per-pod",\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t"op": "add",\n\t\t\t"path": "/spec/replsets/0/expose/labels",\n\t\t\t"value": {\n\t\t\t\t"test": "service-per-pod",\n\t\t\t}\n\t\t}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I7EfLRdb2L perconaservermongodb.psmdb.percona.com/node-port patched + cat /tmp/tmp.pILGnLRMx1 + rm /tmp/tmp.I7EfLRdb2L /tmp/tmp.pILGnLRMx1 + return 0 + sleep 5 + desc 'check if service created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/node-port-rs0-0 -updated + local resource=service/node-port-rs0-0 + local postfix=-updated + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml + local new_result=/tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 + yq eval $'\n\t\t\tdel(.metadata.ownerReferences[].apiVersion) |\n\t\t\tdel(.metadata.managedFields) |\n\t\t\tdel(.. | select(has("creationTimestamp")).creationTimestamp) |\n\t\t\tdel(.. | select(has("namespace")).namespace) |\n\t\t\tdel(.. | select(has("uid")).uid) |\n\t\t\tdel(.metadata.resourceVersion) |\n\t\t\tdel(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) |\n\t\t\tdel(.metadata.selfLink) |\n\t\t\tdel(.metadata.annotations."cloud.google.com/neg") |\n\t\t\tdel(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") |\n\t\t\tdel(.. | select(has("image")).image) |\n\t\t\tdel(.. | select(has("clusterIP")).clusterIP) |\n\t\t\tdel(.. | select(has("clusterIPs")).clusterIPs) |\n\t\t\tdel(.. | select(has("dataSource")).dataSource) |\n\t\t\tdel(.. | select(has("procMount")).procMount) |\n\t\t\tdel(.. | select(has("storageClassName")).storageClassName) |\n\t\t\tdel(.. | select(has("finalizers")).finalizers) |\n\t\t\tdel(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |\n\t\t\tdel(.. | select(has("volumeName")).volumeName) |\n\t\t\tdel(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |\n\t\t\tdel(.spec.volumeMode) |\n\t\t\tdel(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |\n\t\t\tdel(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |\n\t\t\tdel(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |\n\t\t\tdel(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |\n\t\t\tdel(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |\n\t\t\tdel(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |\n\t\t\tdel(.. | select(has("nodePort")).nodePort) |\n\t\t\tdel(.status) |\n\t\t\t(.. | select(tag == "!!str")) |= sub("service-per-pod-110", "NAME_SPACE") |\n\t\t\tdel(.spec.volumeClaimTemplates[].apiVersion) |\n\t\t\tdel(.spec.volumeClaimTemplates[].kind) |\n\t\t\tdel(.spec.ipFamilies) |\n\t\t\tdel(.spec.ipFamilyPolicy) |\n\t\t\t(.. | select(. == "extensions/v1beta1")) = "apps/v1" |\n\t\t\t(.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.EpQbgsky5T ++ mktemp + local LAST_ERR=/tmp/tmp.Eq7oaLRDYi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/node-port-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EpQbgsky5T + cat /tmp/tmp.Eq7oaLRDYi + rm /tmp/tmp.EpQbgsky5T /tmp/tmp.Eq7oaLRDYi + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml /tmp/tmp.vRv2x1gZss/service_node-port-rs0-0.yml + log 'compare_kubectl: service/node-port-rs0-0 OK' + set +o xtrace [2026-04-23T10:16:29+0000] compare_kubectl: service/node-port-rs0-0 OK ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vmd271iUkE +++ mktemp ++ local LAST_ERR=/tmp/tmp.pixhPVDqbz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vmd271iUkE ++ cat /tmp/tmp.pixhPVDqbz ++ rm /tmp/tmp.vmd271iUkE /tmp/tmp.pixhPVDqbz ++ return 0 + current_node_port=31710 + [[ 31710 != 31710 ]] + desc 'delete PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/node-port-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.v4PUMD2p59 ++ mktemp + local LAST_ERR=/tmp/tmp.mEsKT53nXT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/node-port-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.v4PUMD2p59 perconaservermongodb.psmdb.percona.com "node-port" deleted from service-per-pod-110 namespace + cat /tmp/tmp.mEsKT53nXT + rm /tmp/tmp.v4PUMD2p59 /tmp/tmp.mEsKT53nXT + return 0 + desc 'check Mongos in sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- check Mongos in sharded cluster ----------------------------------------------------------------------------------- + local cluster=some-name + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/e2e-tests/service-per-pod/conf/sharded.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2274-acb3b334"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-110/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.WUGvoSB1eg ++ mktemp + local LAST_ERR=/tmp/tmp.sC8jHD82DL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WUGvoSB1eg perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.sC8jHD82DL + rm /tmp/tmp.WUGvoSB1eg /tmp/tmp.sC8jHD82DL + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WaxIeexjJu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Sbl6KhgpH3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WaxIeexjJu ++ cat /tmp/tmp.Sbl6KhgpH3 ++ rm /tmp/tmp.WaxIeexjJu /tmp/tmp.Sbl6KhgpH3 ++ return 0 + [[ '' == true ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yz3Xoakj3c +++ mktemp ++ local LAST_ERR=/tmp/tmp.69KH9Lpbgs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Yz3Xoakj3c ++ cat /tmp/tmp.69KH9Lpbgs ++ rm /tmp/tmp.Yz3Xoakj3c /tmp/tmp.69KH9Lpbgs ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0gyUaBMMFy +++ mktemp ++ local LAST_ERR=/tmp/tmp.tmXPgfOnTg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0gyUaBMMFy ++ cat /tmp/tmp.tmXPgfOnTg ++ rm /tmp/tmp.0gyUaBMMFy /tmp/tmp.tmXPgfOnTg ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness....................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g6nET7eq2s +++ mktemp ++ local LAST_ERR=/tmp/tmp.xz5gItHnZ0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g6nET7eq2s ++ cat /tmp/tmp.xz5gItHnZ0 ++ rm /tmp/tmp.g6nET7eq2s /tmp/tmp.xz5gItHnZ0 ++ return 0 + [[ '' == true ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vgCu1bZoSV +++ mktemp ++ local LAST_ERR=/tmp/tmp.IbLgFbucIN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vgCu1bZoSV ++ cat /tmp/tmp.IbLgFbucIN ++ rm /tmp/tmp.vgCu1bZoSV /tmp/tmp.IbLgFbucIN ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E7fQoVZS5a +++ mktemp ++ local LAST_ERR=/tmp/tmp.SoMaKl4Elw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E7fQoVZS5a ++ cat /tmp/tmp.SoMaKl4Elw ++ rm /tmp/tmp.E7fQoVZS5a /tmp/tmp.SoMaKl4Elw ++ return 0 + [[ '' == true ]] + sleep 10 + [[ false == true ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.B2c3ag6UNH +++ mktemp ++ local LAST_ERR=/tmp/tmp.F2aRPHTkXT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.B2c3ag6UNH ++ cat /tmp/tmp.F2aRPHTkXT ++ rm /tmp/tmp.B2c3ag6UNH /tmp/tmp.F2aRPHTkXT ++ return 0 + [[ '' == true ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2krUZe4Irh +++ mktemp ++ local LAST_ERR=/tmp/tmp.8QJQ3t2cFe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2krUZe4Irh ++ cat /tmp/tmp.8QJQ3t2cFe ++ rm /tmp/tmp.2krUZe4Irh /tmp/tmp.8QJQ3t2cFe ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.z2WOEeUWCf +++ mktemp ++ local LAST_ERR=/tmp/tmp.ngACIVBClF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.z2WOEeUWCf ++ cat /tmp/tmp.ngACIVBClF ++ rm /tmp/tmp.z2WOEeUWCf /tmp/tmp.ngACIVBClF ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + desc 'enabling servicePerPod for mongos' + set +o xtrace ----------------------------------------------------------------------------------- enabling servicePerPod for mongos ----------------------------------------------------------------------------------- + kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"mongos":{"expose":{"servicePerPod":true}}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xtev9l2az1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xMC3HCfMvK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xtev9l2az1 ++ cat /tmp/tmp.xMC3HCfMvK ++ rm /tmp/tmp.xtev9l2az1 /tmp/tmp.xMC3HCfMvK ++ return 0 + [[ '' == true ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ju36YyXC61 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qD1Tkp8vya ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ju36YyXC61 ++ cat /tmp/tmp.qD1Tkp8vya ++ rm /tmp/tmp.ju36YyXC61 /tmp/tmp.qD1Tkp8vya ++ return 0 + [[ '' == true ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u0qEWigup2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UkxZ49SiSU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u0qEWigup2 ++ cat /tmp/tmp.UkxZ49SiSU ++ rm /tmp/tmp.u0qEWigup2 /tmp/tmp.UkxZ49SiSU ++ return 0 + [[ '' == true ]] + sleep 10 + [[ true == true ]] + set +x Waiting for cluster readyness + check_service present some-name-mongos-0 + state=present + svc_name=some-name-mongos-0 + '[' present = present ']' + echo -n 'check that some-name-mongos-0 was created' check that some-name-mongos-0 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-0 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service present some-name-mongos-1 + state=present + svc_name=some-name-mongos-1 + '[' present = present ']' + echo -n 'check that some-name-mongos-1 was created' check that some-name-mongos-1 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-1 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service present some-name-mongos-2 + state=present + svc_name=some-name-mongos-2 + '[' present = present ']' + echo -n 'check that some-name-mongos-2 was created' check that some-name-mongos-2 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-2 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service removed some-name-mongos + state=removed + svc_name=some-name-mongos + '[' removed = present ']' + '[' removed = removed ']' + echo -n 'check that some-name-mongos was removed' check that some-name-mongos was removed++ kubectl_bin get service/some-name-mongos -o 'jsonpath={.spec.type}' ++ grep NotFound + [[ -z Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found ]] + echo .OK .OK + destroy service-per-pod-110 + local namespace=service-per-pod-110 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.PuENtYkLW1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.KJSrYzzoEQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PuENtYkLW1 ++ cat /tmp/tmp.KJSrYzzoEQ No resources found in service-per-pod-110 namespace. ++ rm /tmp/tmp.PuENtYkLW1 /tmp/tmp.KJSrYzzoEQ ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.PfucX9JcpW ++ mktemp + local LAST_ERR=/tmp/tmp.wdgJ1K1HqC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PfucX9JcpW customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.wdgJ1K1HqC + rm /tmp/tmp.PfucX9JcpW /tmp/tmp.wdgJ1K1HqC + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.4dKaK58zBz ++ mktemp + local LAST_ERR=/tmp/tmp.fWeIVkx5XC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4dKaK58zBz + cat /tmp/tmp.fWeIVkx5XC + rm /tmp/tmp.4dKaK58zBz /tmp/tmp.fWeIVkx5XC + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.yTCzFMZAME ++ mktemp + local LAST_ERR=/tmp/tmp.iD2b0Pk1DL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yTCzFMZAME + cat /tmp/tmp.iD2b0Pk1DL + rm /tmp/tmp.yTCzFMZAME /tmp/tmp.iD2b0Pk1DL + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.T1Ob2ZmT5D ++ mktemp + local LAST_ERR=/tmp/tmp.XmWwIQLNwk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.T1Ob2ZmT5D + cat /tmp/tmp.XmWwIQLNwk + rm /tmp/tmp.T1Ob2ZmT5D /tmp/tmp.XmWwIQLNwk + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.onQqjmjXcc ++ mktemp + local LAST_ERR=/tmp/tmp.LXg4XNXc45 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2274/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.onQqjmjXcc clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.LXg4XNXc45 + rm /tmp/tmp.onQqjmjXcc /tmp/tmp.LXg4XNXc45 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.PNkwPi4s1B ++ mktemp + local LAST_ERR=/tmp/tmp.Vpab0AnQj8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.PNkwPi4s1B namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace service "cert-manager-cainjector" deleted from cert-manager namespace service "cert-manager" deleted from cert-manager namespace service "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.Vpab0AnQj8 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.PNkwPi4s1B + cat /tmp/tmp.Vpab0AnQj8 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.PNkwPi4s1B + cat /tmp/tmp.Vpab0AnQj8 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.PNkwPi4s1B + cat /tmp/tmp.Vpab0AnQj8 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.PNkwPi4s1B /tmp/tmp.Vpab0AnQj8 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace service-per-pod-110 + rm -rf /tmp/tmp.vRv2x1gZss + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.YwNidItONc + local LAST_OUT=/tmp/tmp.o2LPRIK2bZ ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.2trS2J7MYS + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.bR6f2rZIbZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace service-per-pod-110