Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/logs/service-per-pod.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + main + create_infra service-per-pod-27363 + local ns=service-per-pod-27363 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.9UrGR6aws1 ++ mktemp + local LAST_ERR=/tmp/tmp.78mQWoNEtK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9UrGR6aws1 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.78mQWoNEtK + rm /tmp/tmp.9UrGR6aws1 /tmp/tmp.78mQWoNEtK + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.BDHxW3eU70 ++ mktemp + local LAST_ERR=/tmp/tmp.hf56XukmeK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BDHxW3eU70 + cat /tmp/tmp.hf56XukmeK + rm /tmp/tmp.BDHxW3eU70 /tmp/tmp.hf56XukmeK + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ZOafNlPS6J ++ mktemp + local LAST_ERR=/tmp/tmp.p8LkjMzJPg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZOafNlPS6J + cat /tmp/tmp.p8LkjMzJPg + rm /tmp/tmp.ZOafNlPS6J /tmp/tmp.p8LkjMzJPg + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.719i0jSSrx ++ mktemp + local LAST_ERR=/tmp/tmp.CCx7PCRBia + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.719i0jSSrx + cat /tmp/tmp.CCx7PCRBia + rm /tmp/tmp.719i0jSSrx /tmp/tmp.CCx7PCRBia + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.JB4ZS3DyAH ++ mktemp + local LAST_ERR=/tmp/tmp.VCI1Fr7wMZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JB4ZS3DyAH clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.VCI1Fr7wMZ + rm /tmp/tmp.JB4ZS3DyAH /tmp/tmp.VCI1Fr7wMZ + return 0 + check_crd_for_deletion PR-2285-ace12b60 + local git_tag=PR-2285-ace12b60 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2285-ace12b60/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K8VjTaIaCF +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Tji8RvqSd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.K8VjTaIaCF ++ cat /tmp/tmp.3Tji8RvqSd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.K8VjTaIaCF ++ cat /tmp/tmp.3Tji8RvqSd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.K8VjTaIaCF ++ cat /tmp/tmp.3Tji8RvqSd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.K8VjTaIaCF ++ cat /tmp/tmp.3Tji8RvqSd Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.K8VjTaIaCF /tmp/tmp.3Tji8RvqSd ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.FfEx2NuSBz ++ mktemp + local LAST_OUT=/tmp/tmp.TGrVk6scep ++ mktemp + local LAST_ERR=/tmp/tmp.n8tktIpXwl + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.kW3oQnWPL7 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FfEx2NuSBz + cat /tmp/tmp.n8tktIpXwl + rm /tmp/tmp.FfEx2NuSBz /tmp/tmp.n8tktIpXwl + return 0 namespace "cert-manager" deleted namespace "service-per-pod-13487" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TGrVk6scep namespace "psmdb-operator" deleted + cat /tmp/tmp.kW3oQnWPL7 + rm /tmp/tmp.TGrVk6scep /tmp/tmp.kW3oQnWPL7 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.BhUutXicIA ++ mktemp + local LAST_ERR=/tmp/tmp.THt69bn4bm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BhUutXicIA + cat /tmp/tmp.THt69bn4bm + rm /tmp/tmp.BhUutXicIA /tmp/tmp.THt69bn4bm + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.4G0bDq9ltK ++ mktemp + local LAST_ERR=/tmp/tmp.BJMb3UE41N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4G0bDq9ltK namespace/psmdb-operator created + cat /tmp/tmp.BJMb3UE41N + rm /tmp/tmp.4G0bDq9ltK /tmp/tmp.BJMb3UE41N + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6kRfETxbC0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mIdtqChdGs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6kRfETxbC0 ++ cat /tmp/tmp.mIdtqChdGs ++ rm /tmp/tmp.6kRfETxbC0 /tmp/tmp.mIdtqChdGs ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2285-ace12b60-1-cluster7 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.cI2hQtgId5 ++ mktemp + local LAST_ERR=/tmp/tmp.A9ok4excRF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2285-ace12b60-1-cluster7 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cI2hQtgId5 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2285-ace12b60-1-cluster7" modified. + cat /tmp/tmp.A9ok4excRF + rm /tmp/tmp.cI2hQtgId5 /tmp/tmp.A9ok4excRF + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2285-ace12b60' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2285-ace12b60 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6OPjw59Aq4 ++ mktemp + local LAST_ERR=/tmp/tmp.3Eqj3aEbd9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6OPjw59Aq4 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.3Eqj3aEbd9 + rm /tmp/tmp.6OPjw59Aq4 /tmp/tmp.3Eqj3aEbd9 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Ye69IgM0gX ++ mktemp + local LAST_ERR=/tmp/tmp.ln9NsEfGYE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ye69IgM0gX clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.ln9NsEfGYE + rm /tmp/tmp.Ye69IgM0gX /tmp/tmp.ln9NsEfGYE + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2285-ace12b60") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.5oWEwpW9Zg ++ mktemp + local LAST_ERR=/tmp/tmp.ltF38nKflt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5oWEwpW9Zg deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.ltF38nKflt + rm /tmp/tmp.5oWEwpW9Zg /tmp/tmp.ltF38nKflt + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Igr86kXmhH +++ mktemp ++ local LAST_ERR=/tmp/tmp.HYCCHX8gDx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Igr86kXmhH ++ cat /tmp/tmp.HYCCHX8gDx ++ rm /tmp/tmp.Igr86kXmhH /tmp/tmp.HYCCHX8gDx ++ return 0 + wait_operator_pod percona-server-mongodb-operator-c46d8d6c4-6wgf6 + local pod=percona-server-mongodb-operator-c46d8d6c4-6wgf6 + set +o xtrace waiting for pod/percona-server-mongodb-operator-c46d8d6c4-6wgf6 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.QLsrHDLd7w +++ mktemp ++ local LAST_ERR=/tmp/tmp.p7OjOO20bm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QLsrHDLd7w ++ cat /tmp/tmp.p7OjOO20bm ++ rm /tmp/tmp.QLsrHDLd7w /tmp/tmp.p7OjOO20bm ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-c46d8d6c4-6wgf6 ++ mktemp + local LAST_OUT=/tmp/tmp.V7IldM4TQ5 ++ mktemp + local LAST_ERR=/tmp/tmp.PAuZHLL42j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-c46d8d6c4-6wgf6 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V7IldM4TQ5 + cat /tmp/tmp.PAuZHLL42j + rm /tmp/tmp.V7IldM4TQ5 /tmp/tmp.PAuZHLL42j + return 0 2026-03-15T19:12:10.134Z INFO setup Manager starting up {"gitCommit": "ace12b605f36447907325d565c954115594804c6", "gitBranch": "PR-2285-ace12b60", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace service-per-pod-27363 + local namespace=service-per-pod-27363 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces service-per-pod-27363' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces service-per-pod-27363 ----------------------------------------------------------------------------------- + xargs kubectl delete ns + kubectl_bin delete namespace service-per-pod-27363 --ignore-not-found + awk '{print$1}' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.45CiR3gB4U ++ mktemp + local LAST_OUT=/tmp/tmp.t9nEg5Bu5n + local LAST_ERR=/tmp/tmp.spDFZHYSRl + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.k1HX7GuCQc + local exit_status=0 + local timeout=4 + for i in $(seq 0 2) + set +e + kubectl get ns ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace service-per-pod-27363 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.45CiR3gB4U + cat /tmp/tmp.spDFZHYSRl + rm /tmp/tmp.45CiR3gB4U /tmp/tmp.spDFZHYSRl + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.t9nEg5Bu5n + cat /tmp/tmp.k1HX7GuCQc + rm /tmp/tmp.t9nEg5Bu5n /tmp/tmp.k1HX7GuCQc + return 0 + kubectl_bin wait --for=delete namespace service-per-pod-27363 ++ mktemp + local LAST_OUT=/tmp/tmp.xkQiHOH0E2 ++ mktemp + local LAST_ERR=/tmp/tmp.NMm3rjKhfR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace service-per-pod-27363 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xkQiHOH0E2 + cat /tmp/tmp.NMm3rjKhfR + rm /tmp/tmp.xkQiHOH0E2 /tmp/tmp.NMm3rjKhfR + return 0 + desc 'create namespace service-per-pod-27363' + set +o xtrace ----------------------------------------------------------------------------------- create namespace service-per-pod-27363 ----------------------------------------------------------------------------------- + kubectl_bin create namespace service-per-pod-27363 ++ mktemp + local LAST_OUT=/tmp/tmp.QkE6XhIveL ++ mktemp + local LAST_ERR=/tmp/tmp.yiWTqcHVxS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace service-per-pod-27363 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QkE6XhIveL namespace/service-per-pod-27363 created + cat /tmp/tmp.yiWTqcHVxS + rm /tmp/tmp.QkE6XhIveL /tmp/tmp.yiWTqcHVxS + return 0 + set_kube_ctx service-per-pod-27363 + local namespace=service-per-pod-27363 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.ISDAImBxSQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.OWxbs1ke0h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ISDAImBxSQ ++ cat /tmp/tmp.OWxbs1ke0h ++ rm /tmp/tmp.ISDAImBxSQ /tmp/tmp.OWxbs1ke0h ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2285-ace12b60-1-cluster7 --namespace=service-per-pod-27363 ++ mktemp + local LAST_OUT=/tmp/tmp.ZnCrFCFF0Y ++ mktemp + local LAST_ERR=/tmp/tmp.Sh9N2Lq935 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2285-ace12b60-1-cluster7 --namespace=service-per-pod-27363 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZnCrFCFF0Y Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2285-ace12b60-1-cluster7" modified. + cat /tmp/tmp.Sh9N2Lq935 + rm /tmp/tmp.ZnCrFCFF0Y /tmp/tmp.Sh9N2Lq935 + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.o4PDjlCgJc ++ mktemp + local LAST_ERR=/tmp/tmp.PtZ8TETs5E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o4PDjlCgJc namespace/cert-manager created + cat /tmp/tmp.PtZ8TETs5E + rm /tmp/tmp.o4PDjlCgJc /tmp/tmp.PtZ8TETs5E + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.fl6cGu1zjM ++ mktemp + local LAST_ERR=/tmp/tmp.GONHgQdP2d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fl6cGu1zjM namespace/cert-manager labeled + cat /tmp/tmp.GONHgQdP2d + rm /tmp/tmp.fl6cGu1zjM /tmp/tmp.GONHgQdP2d + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.8WnhGsVtyp ++ mktemp + local LAST_ERR=/tmp/tmp.uW5HJp90Yz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8WnhGsVtyp namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.uW5HJp90Yz Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.8WnhGsVtyp /tmp/tmp.uW5HJp90Yz + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.6EcheazkJg ++ mktemp + local LAST_ERR=/tmp/tmp.V8LmUpHbW9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6EcheazkJg pod/cert-manager-559d798845-d9l66 condition met pod/cert-manager-cainjector-64958d9c7c-qd92r condition met pod/cert-manager-webhook-7fb6f99b56-gmp22 condition met + cat /tmp/tmp.V8LmUpHbW9 + rm /tmp/tmp.6EcheazkJg /tmp/tmp.V8LmUpHbW9 + return 0 + sleep 120 + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.k0qTBbygqc ++ mktemp + local LAST_ERR=/tmp/tmp.k66XB2Svzx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/conf/client.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k0qTBbygqc deployment.apps/psmdb-client created secret/some-users created + cat /tmp/tmp.k66XB2Svzx + rm /tmp/tmp.k0qTBbygqc /tmp/tmp.k66XB2Svzx + return 0 + desc 'check ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- check ClusterIP ----------------------------------------------------------------------------------- + check_cr_config cluster-ip-rs0 + local cluster=cluster-ip-rs0 + desc 'create PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2285-ace12b60"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-27363/g + local LAST_OUT=/tmp/tmp.yTekQURH3U + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.HfKL5Mp5DU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yTekQURH3U perconaservermongodb.psmdb.percona.com/cluster-ip created + cat /tmp/tmp.HfKL5Mp5DU + rm /tmp/tmp.yTekQURH3U /tmp/tmp.HfKL5Mp5DU + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running cluster-ip-rs0 3 false + local name=cluster-ip-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=cluster-ip ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod cluster-ip-rs0-0 + local pod=cluster-ip-rs0-0 + set +o xtrace waiting for pod/cluster-ip-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod cluster-ip-rs0-1 + local pod=cluster-ip-rs0-1 + set +o xtrace waiting for pod/cluster-ip-rs0-1 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XhbYCnQ59s +++ mktemp ++ local LAST_ERR=/tmp/tmp.TZGfMLNirF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XhbYCnQ59s ++ cat /tmp/tmp.TZGfMLNirF ++ rm /tmp/tmp.XhbYCnQ59s /tmp/tmp.TZGfMLNirF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod cluster-ip-rs0-2 + local pod=cluster-ip-rs0-2 + set +o xtrace waiting for pod/cluster-ip-rs0-2 to be ready..................OK ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FDcU0ICBMs +++ mktemp ++ local LAST_ERR=/tmp/tmp.W5NkadMGqS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FDcU0ICBMs ++ cat /tmp/tmp.W5NkadMGqS ++ rm /tmp/tmp.FDcU0ICBMs /tmp/tmp.W5NkadMGqS ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8bRWFtkAp1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ycpa3ah8B3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb cluster-ip -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8bRWFtkAp1 ++ cat /tmp/tmp.Ycpa3ah8B3 ++ rm /tmp/tmp.8bRWFtkAp1 /tmp/tmp.Ycpa3ah8B3 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/cluster-ip-rs0 + local resource=statefulset/cluster-ip-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml + local new_result=/tmp/tmp.WHbJ0yO9r7/statefulset_cluster-ip-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/cluster-ip-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-27363", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.H9RsPCLnlj ++ mktemp + local LAST_ERR=/tmp/tmp.ULGINPdSth + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/cluster-ip-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.H9RsPCLnlj + cat /tmp/tmp.ULGINPdSth + rm /tmp/tmp.H9RsPCLnlj /tmp/tmp.ULGINPdSth + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.WHbJ0yO9r7/statefulset_cluster-ip-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.WHbJ0yO9r7/statefulset_cluster-ip-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.WHbJ0yO9r7/statefulset_cluster-ip-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_cluster-ip-rs0.yml /tmp/tmp.WHbJ0yO9r7/statefulset_cluster-ip-rs0.yml + log 'compare_kubectl: statefulset/cluster-ip-rs0 OK' + set +o xtrace [2026-03-15T19:17:02+0000] compare_kubectl: statefulset/cluster-ip-rs0 OK + compare_kubectl service/cluster-ip-rs0-0 + local resource=service/cluster-ip-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml + local new_result=/tmp/tmp.WHbJ0yO9r7/service_cluster-ip-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/cluster-ip-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-27363", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.kNczl4nCoK ++ mktemp + local LAST_ERR=/tmp/tmp.vKq7hRJDIF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/cluster-ip-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kNczl4nCoK + cat /tmp/tmp.vKq7hRJDIF + rm /tmp/tmp.kNczl4nCoK /tmp/tmp.vKq7hRJDIF + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.WHbJ0yO9r7/service_cluster-ip-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.WHbJ0yO9r7/service_cluster-ip-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.WHbJ0yO9r7/service_cluster-ip-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_cluster-ip-rs0-0.yml /tmp/tmp.WHbJ0yO9r7/service_cluster-ip-rs0-0.yml + log 'compare_kubectl: service/cluster-ip-rs0-0 OK' + set +o xtrace [2026-03-15T19:17:03+0000] compare_kubectl: service/cluster-ip-rs0-0 OK ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kRjDYWHyqQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UNZjVRqd25 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kRjDYWHyqQ +++ cat /tmp/tmp.UNZjVRqd25 +++ rm /tmp/tmp.kRjDYWHyqQ /tmp/tmp.UNZjVRqd25 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6TLqpm8FEe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Bec4I70MGy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6TLqpm8FEe +++ cat /tmp/tmp.Bec4I70MGy +++ rm /tmp/tmp.6TLqpm8FEe /tmp/tmp.Bec4I70MGy +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vHvFdN7oal +++ mktemp ++ local LAST_ERR=/tmp/tmp.bLEeTCJB5F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vHvFdN7oal ++ cat /tmp/tmp.bLEeTCJB5F ++ rm /tmp/tmp.vHvFdN7oal /tmp/tmp.bLEeTCJB5F ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YFG1oOeujk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HfoO7L4xeP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YFG1oOeujk +++ cat /tmp/tmp.HfoO7L4xeP +++ rm /tmp/tmp.YFG1oOeujk /tmp/tmp.HfoO7L4xeP +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iOYI2FrS0k ++++ mktemp +++ local LAST_ERR=/tmp/tmp.W7JAMkSY6I +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iOYI2FrS0k +++ cat /tmp/tmp.W7JAMkSY6I +++ rm /tmp/tmp.iOYI2FrS0k /tmp/tmp.W7JAMkSY6I +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Hgq5swTzgn +++ mktemp ++ local LAST_ERR=/tmp/tmp.AqpUGeWjtI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Hgq5swTzgn ++ cat /tmp/tmp.AqpUGeWjtI ++ rm /tmp/tmp.Hgq5swTzgn /tmp/tmp.AqpUGeWjtI ++ return 0 ++ return ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WypWnhdhEi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aW8ilJ9cxU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WypWnhdhEi +++ cat /tmp/tmp.aW8ilJ9cxU +++ rm /tmp/tmp.WypWnhdhEi /tmp/tmp.aW8ilJ9cxU +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NgkJe8Fw25 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xUx7URB29q +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NgkJe8Fw25 +++ cat /tmp/tmp.xUx7URB29q +++ rm /tmp/tmp.NgkJe8Fw25 /tmp/tmp.xUx7URB29q +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.whlXZob1wG +++ mktemp ++ local LAST_ERR=/tmp/tmp.tO5tdMlXwi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.whlXZob1wG ++ cat /tmp/tmp.tO5tdMlXwi ++ rm /tmp/tmp.whlXZob1wG /tmp/tmp.tO5tdMlXwi ++ return 0 ++ return + local URI=34.118.232.95,34.118.232.254,34.118.237.35 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.232.95,34.118.232.254,34.118.237.35 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.232.95,34.118.232.254,34.118.237.35 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@34.118.232.95,34.118.232.254,34.118.237.35 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.G5LpTTtMx0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YJwqtiz4hB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.G5LpTTtMx0 ++ cat /tmp/tmp.YJwqtiz4hB ++ rm /tmp/tmp.G5LpTTtMx0 /tmp/tmp.YJwqtiz4hB ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.232.95,34.118.232.254,34.118.237.35:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BM4mBvZetM ++ mktemp + local LAST_ERR=/tmp/tmp.CuSU75900F + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.232.95,34.118.232.254,34.118.237.35:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BM4mBvZetM Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.232.95:27017,34.118.232.254:27017,34.118.237.35:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("14a65d6c-ff73-4aec-b6d5-47e18fd4e412") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.CuSU75900F + rm /tmp/tmp.BM4mBvZetM /tmp/tmp.CuSU75900F + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.232.95,34.118.232.254,34.118.237.35 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.232.95,34.118.232.254,34.118.237.35 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.232.95,34.118.232.254,34.118.237.35 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2E0yqodTyZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.TjTVH15CP4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2E0yqodTyZ ++ cat /tmp/tmp.TjTVH15CP4 ++ rm /tmp/tmp.2E0yqodTyZ /tmp/tmp.TjTVH15CP4 ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.232.95,34.118.232.254,34.118.237.35:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.pe8O4y9aKW ++ mktemp + local LAST_ERR=/tmp/tmp.tj1IgbVG4C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.232.95,34.118.232.254,34.118.237.35:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.pe8O4y9aKW Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.232.95:27017,34.118.232.254:27017,34.118.237.35:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("4157d012-a99c-4ecc-8ee7-5e86ba6ac9c2") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tj1IgbVG4C + rm /tmp/tmp.pe8O4y9aKW /tmp/tmp.tj1IgbVG4C + return 0 + sleep 30 ++ get_service_ip cluster-ip-rs0-0 ++ local service=cluster-ip-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jTytccyBKh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3i65pHn0ej +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jTytccyBKh +++ cat /tmp/tmp.3i65pHn0ej +++ rm /tmp/tmp.jTytccyBKh /tmp/tmp.3i65pHn0ej +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qqLk7jjz0H ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tiBJDfRmN5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qqLk7jjz0H +++ cat /tmp/tmp.tiBJDfRmN5 +++ rm /tmp/tmp.qqLk7jjz0H /tmp/tmp.tiBJDfRmN5 +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E7noIpkD1q +++ mktemp ++ local LAST_ERR=/tmp/tmp.R1CmpnNPF0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E7noIpkD1q ++ cat /tmp/tmp.R1CmpnNPF0 ++ rm /tmp/tmp.E7noIpkD1q /tmp/tmp.R1CmpnNPF0 ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.232.95 '' :27017 + local command=find + local uri=myApp:myPass@34.118.232.95 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:18:34+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.232.95 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.232.95 + local driver=mongodb + local suffix=:27017 + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.232.95 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lsZSZTG1lh +++ mktemp ++ local LAST_ERR=/tmp/tmp.08yU5xPCg4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lsZSZTG1lh ++ cat /tmp/tmp.08yU5xPCg4 ++ rm /tmp/tmp.lsZSZTG1lh /tmp/tmp.08yU5xPCg4 ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.95:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.uRw3RSKBG1 ++ mktemp + local LAST_ERR=/tmp/tmp.qNC45nfqkz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.95:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uRw3RSKBG1 + cat /tmp/tmp.qNC45nfqkz + rm /tmp/tmp.uRw3RSKBG1 /tmp/tmp.qNC45nfqkz + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find ++ get_service_ip cluster-ip-rs0-1 ++ local service=cluster-ip-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rw1AIbHAe3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5AWVNzomw9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rw1AIbHAe3 +++ cat /tmp/tmp.5AWVNzomw9 +++ rm /tmp/tmp.rw1AIbHAe3 /tmp/tmp.5AWVNzomw9 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cdF0smnPPo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.r4QwRfeWJd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cdF0smnPPo +++ cat /tmp/tmp.r4QwRfeWJd +++ rm /tmp/tmp.cdF0smnPPo /tmp/tmp.r4QwRfeWJd +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kcRXyPrj6M +++ mktemp ++ local LAST_ERR=/tmp/tmp.JQ4KYTE9cI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kcRXyPrj6M ++ cat /tmp/tmp.JQ4KYTE9cI ++ rm /tmp/tmp.kcRXyPrj6M /tmp/tmp.JQ4KYTE9cI ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.232.254 '' :27017 + local command=find + local uri=myApp:myPass@34.118.232.254 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:18:40+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.232.254 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.232.254 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.232.254 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vfHJ0qO6pg +++ mktemp ++ local LAST_ERR=/tmp/tmp.p7r5QZ4znx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vfHJ0qO6pg ++ cat /tmp/tmp.p7r5QZ4znx ++ rm /tmp/tmp.vfHJ0qO6pg /tmp/tmp.p7r5QZ4znx ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.254:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.84CnwHSlSf ++ mktemp + local LAST_ERR=/tmp/tmp.ESEBjMPoOZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.254:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.84CnwHSlSf + cat /tmp/tmp.ESEBjMPoOZ + rm /tmp/tmp.84CnwHSlSf /tmp/tmp.ESEBjMPoOZ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find ++ get_service_ip cluster-ip-rs0-2 ++ local service=cluster-ip-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6bhAGYNrI6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uRYN2jDI1B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/cluster-ip -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6bhAGYNrI6 +++ cat /tmp/tmp.uRYN2jDI1B +++ rm /tmp/tmp.6bhAGYNrI6 /tmp/tmp.uRYN2jDI1B +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pbprFNGgJi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8BpMGyg6yt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pbprFNGgJi +++ cat /tmp/tmp.8BpMGyg6yt +++ rm /tmp/tmp.pbprFNGgJi /tmp/tmp.8BpMGyg6yt +++ return 0 ++ service_type=ClusterIP ++ '[' ClusterIP = ClusterIP ']' ++ kubectl_bin get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VuFOktls2Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.aUuC3kDobN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/cluster-ip-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VuFOktls2Y ++ cat /tmp/tmp.aUuC3kDobN ++ rm /tmp/tmp.VuFOktls2Y /tmp/tmp.aUuC3kDobN ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.237.35 '' :27017 + local command=find + local uri=myApp:myPass@34.118.237.35 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:18:47+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.237.35 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.237.35 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.237.35 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Iv22LiHWqu +++ mktemp ++ local LAST_ERR=/tmp/tmp.am3vMGF5tO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Iv22LiHWqu ++ cat /tmp/tmp.am3vMGF5tO ++ rm /tmp/tmp.Iv22LiHWqu /tmp/tmp.am3vMGF5tO ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.237.35:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.OTOWxLHcYB ++ mktemp + local LAST_ERR=/tmp/tmp.CNnmlpvGue + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.237.35:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OTOWxLHcYB + cat /tmp/tmp.CNnmlpvGue + rm /tmp/tmp.OTOWxLHcYB /tmp/tmp.CNnmlpvGue + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find + [[ cluster-ip-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'delete PSMDB cluster cluster-ip-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster cluster-ip-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.90ZE7BAh5q ++ mktemp + local LAST_ERR=/tmp/tmp.Qs7hyZcIbk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/cluster-ip-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.90ZE7BAh5q perconaservermongodb.psmdb.percona.com "cluster-ip" deleted from service-per-pod-27363 namespace + cat /tmp/tmp.Qs7hyZcIbk + rm /tmp/tmp.90ZE7BAh5q /tmp/tmp.Qs7hyZcIbk + return 0 + desc 'check LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- check LoadBalancer ----------------------------------------------------------------------------------- + check_cr_config local-balancer-rs0 + local cluster=local-balancer-rs0 + desc 'create PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2285-ace12b60"' + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-27363/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.1rrwmLdum7 ++ mktemp + local LAST_ERR=/tmp/tmp.nE4kGkKizP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1rrwmLdum7 perconaservermongodb.psmdb.percona.com/local-balancer created + cat /tmp/tmp.nE4kGkKizP + rm /tmp/tmp.1rrwmLdum7 /tmp/tmp.nE4kGkKizP + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running local-balancer-rs0 3 false + local name=local-balancer-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=local-balancer ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod local-balancer-rs0-0 + local pod=local-balancer-rs0-0 + set +o xtrace waiting for pod/local-balancer-rs0-0 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod local-balancer-rs0-1 + local pod=local-balancer-rs0-1 + set +o xtrace waiting for pod/local-balancer-rs0-1 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OwdUOd6Xif +++ mktemp ++ local LAST_ERR=/tmp/tmp.8Hy9FZjgzW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OwdUOd6Xif ++ cat /tmp/tmp.8Hy9FZjgzW ++ rm /tmp/tmp.OwdUOd6Xif /tmp/tmp.8Hy9FZjgzW ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod local-balancer-rs0-2 + local pod=local-balancer-rs0-2 + set +o xtrace waiting for pod/local-balancer-rs0-2 to be ready......OK ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y5NvYOz7bM +++ mktemp ++ local LAST_ERR=/tmp/tmp.qPIzWcpeet ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.y5NvYOz7bM ++ cat /tmp/tmp.qPIzWcpeet ++ rm /tmp/tmp.y5NvYOz7bM /tmp/tmp.qPIzWcpeet ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bZjJKdrGXz +++ mktemp ++ local LAST_ERR=/tmp/tmp.FuurcX36AC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb local-balancer -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.bZjJKdrGXz ++ cat /tmp/tmp.FuurcX36AC ++ rm /tmp/tmp.bZjJKdrGXz /tmp/tmp.FuurcX36AC ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/local-balancer-rs0 + local resource=statefulset/local-balancer-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml + local new_result=/tmp/tmp.WHbJ0yO9r7/statefulset_local-balancer-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/local-balancer-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-27363", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KCjly5b3Fv ++ mktemp + local LAST_ERR=/tmp/tmp.SDlof9jgGC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/local-balancer-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KCjly5b3Fv + cat /tmp/tmp.SDlof9jgGC + rm /tmp/tmp.KCjly5b3Fv /tmp/tmp.SDlof9jgGC + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.WHbJ0yO9r7/statefulset_local-balancer-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.WHbJ0yO9r7/statefulset_local-balancer-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.WHbJ0yO9r7/statefulset_local-balancer-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_local-balancer-rs0.yml /tmp/tmp.WHbJ0yO9r7/statefulset_local-balancer-rs0.yml + log 'compare_kubectl: statefulset/local-balancer-rs0 OK' + set +o xtrace [2026-03-15T19:20:20+0000] compare_kubectl: statefulset/local-balancer-rs0 OK + compare_kubectl service/local-balancer-rs0-0 + local resource=service/local-balancer-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml + local new_result=/tmp/tmp.WHbJ0yO9r7/service_local-balancer-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/local-balancer-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-27363", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.hHk92maK8K ++ mktemp + local LAST_ERR=/tmp/tmp.HqE3EOvoBf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/local-balancer-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hHk92maK8K + cat /tmp/tmp.HqE3EOvoBf + rm /tmp/tmp.hHk92maK8K /tmp/tmp.HqE3EOvoBf + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.WHbJ0yO9r7/service_local-balancer-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.WHbJ0yO9r7/service_local-balancer-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.WHbJ0yO9r7/service_local-balancer-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_local-balancer-rs0-0.yml /tmp/tmp.WHbJ0yO9r7/service_local-balancer-rs0-0.yml + log 'compare_kubectl: service/local-balancer-rs0-0 OK' + set +o xtrace [2026-03-15T19:20:21+0000] compare_kubectl: service/local-balancer-rs0-0 OK ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vHFwHj4xBi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nRm5DiYfpY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vHFwHj4xBi +++ cat /tmp/tmp.nRm5DiYfpY +++ rm /tmp/tmp.vHFwHj4xBi /tmp/tmp.nRm5DiYfpY +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8pWsBXSmAo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZmWLqiRIkh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8pWsBXSmAo +++ cat /tmp/tmp.ZmWLqiRIkh +++ rm /tmp/tmp.8pWsBXSmAo /tmp/tmp.ZmWLqiRIkh +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ck9CuzTnYp +++ mktemp ++ local LAST_ERR=/tmp/tmp.ynLI4rpnL5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ck9CuzTnYp ++ cat /tmp/tmp.ynLI4rpnL5 ++ rm /tmp/tmp.ck9CuzTnYp /tmp/tmp.ynLI4rpnL5 ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k8a1GwI4Yz +++ mktemp ++ local LAST_ERR=/tmp/tmp.Xn31fBu03s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k8a1GwI4Yz ++ cat /tmp/tmp.Xn31fBu03s ++ rm /tmp/tmp.k8a1GwI4Yz /tmp/tmp.Xn31fBu03s ++ return 0 ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ev3vsslEDe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.h395X797xi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Ev3vsslEDe +++ cat /tmp/tmp.h395X797xi +++ rm /tmp/tmp.Ev3vsslEDe /tmp/tmp.h395X797xi +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0VqE0TnZZg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5QUkRH8O3Z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0VqE0TnZZg +++ cat /tmp/tmp.5QUkRH8O3Z +++ rm /tmp/tmp.0VqE0TnZZg /tmp/tmp.5QUkRH8O3Z +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ODzqfgtDDs +++ mktemp ++ local LAST_ERR=/tmp/tmp.tYBM0EL713 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ODzqfgtDDs ++ cat /tmp/tmp.tYBM0EL713 ++ rm /tmp/tmp.ODzqfgtDDs /tmp/tmp.tYBM0EL713 ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wYmwreDIuz +++ mktemp ++ local LAST_ERR=/tmp/tmp.UwQq87xpoB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wYmwreDIuz ++ cat /tmp/tmp.UwQq87xpoB ++ rm /tmp/tmp.wYmwreDIuz /tmp/tmp.UwQq87xpoB ++ return 0 ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Dc03q1sbzs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jBUIxhQwAn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Dc03q1sbzs +++ cat /tmp/tmp.jBUIxhQwAn +++ rm /tmp/tmp.Dc03q1sbzs /tmp/tmp.jBUIxhQwAn +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mqxjyakT2c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2jhz2nM3iW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.mqxjyakT2c +++ cat /tmp/tmp.2jhz2nM3iW +++ rm /tmp/tmp.mqxjyakT2c /tmp/tmp.2jhz2nM3iW +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ sleep 1 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RofrGA1oAL +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZniB7qt22B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RofrGA1oAL ++ cat /tmp/tmp.ZniB7qt22B ++ rm /tmp/tmp.RofrGA1oAL /tmp/tmp.ZniB7qt22B ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pBKujbXXBh +++ mktemp ++ local LAST_ERR=/tmp/tmp.lSGNXDRXcg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pBKujbXXBh ++ cat /tmp/tmp.lSGNXDRXcg ++ rm /tmp/tmp.pBKujbXXBh /tmp/tmp.lSGNXDRXcg ++ return 0 + local URI=136.115.95.246,34.60.141.8,34.55.87.252 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@136.115.95.246,34.60.141.8,34.55.87.252 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@136.115.95.246,34.60.141.8,34.55.87.252 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@136.115.95.246,34.60.141.8,34.55.87.252 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zMuUL9Z9gp +++ mktemp ++ local LAST_ERR=/tmp/tmp.gwNEjTGFl5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zMuUL9Z9gp ++ cat /tmp/tmp.gwNEjTGFl5 ++ rm /tmp/tmp.zMuUL9Z9gp /tmp/tmp.gwNEjTGFl5 ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@136.115.95.246,34.60.141.8,34.55.87.252:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.OrC44BWecT ++ mktemp + local LAST_ERR=/tmp/tmp.2iu2MdHHKO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@136.115.95.246,34.60.141.8,34.55.87.252:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OrC44BWecT Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://136.115.95.246:27017,34.60.141.8:27017,34.55.87.252:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f1f20d99-d446-42ae-9e28-b14be8c4d8c3") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.2iu2MdHHKO + rm /tmp/tmp.OrC44BWecT /tmp/tmp.2iu2MdHHKO + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@136.115.95.246,34.60.141.8,34.55.87.252 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@136.115.95.246,34.60.141.8,34.55.87.252 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@136.115.95.246,34.60.141.8,34.55.87.252 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iwbokmOSPv +++ mktemp ++ local LAST_ERR=/tmp/tmp.L0uAdd8ZDU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iwbokmOSPv ++ cat /tmp/tmp.L0uAdd8ZDU ++ rm /tmp/tmp.iwbokmOSPv /tmp/tmp.L0uAdd8ZDU ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@136.115.95.246,34.60.141.8,34.55.87.252:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.mxHVb7Y9kc ++ mktemp + local LAST_ERR=/tmp/tmp.vgC9DvIH5c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@136.115.95.246,34.60.141.8,34.55.87.252:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mxHVb7Y9kc Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://136.115.95.246:27017,34.60.141.8:27017,34.55.87.252:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("e21de22d-e991-43f9-bc3f-46ded3d377bc") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.vgC9DvIH5c + rm /tmp/tmp.mxHVb7Y9kc /tmp/tmp.vgC9DvIH5c + return 0 + sleep 30 ++ get_service_ip local-balancer-rs0-0 ++ local service=local-balancer-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vtWIbf9R13 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3ErsiGAdEU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vtWIbf9R13 +++ cat /tmp/tmp.3ErsiGAdEU +++ rm /tmp/tmp.vtWIbf9R13 /tmp/tmp.3ErsiGAdEU +++ return 0 ++ '[' true '!=' true ']' ++ grep -q NotFound ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m93Brykiqr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qo3YHmNfAG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.m93Brykiqr +++ cat /tmp/tmp.qo3YHmNfAG +++ rm /tmp/tmp.m93Brykiqr /tmp/tmp.qo3YHmNfAG +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VshU8Vdaaj +++ mktemp ++ local LAST_ERR=/tmp/tmp.mUMcQEtvMD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VshU8Vdaaj ++ cat /tmp/tmp.mUMcQEtvMD ++ rm /tmp/tmp.VshU8Vdaaj /tmp/tmp.mUMcQEtvMD ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FF8QCIzMV4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.iVrKve4DmB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-0 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FF8QCIzMV4 ++ cat /tmp/tmp.iVrKve4DmB ++ rm /tmp/tmp.FF8QCIzMV4 /tmp/tmp.iVrKve4DmB ++ return 0 + compare_mongo_cmd find myApp:myPass@136.115.95.246 '' :27017 + local command=find + local uri=myApp:myPass@136.115.95.246 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:22:20+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@136.115.95.246 mongodb :27017 '' + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local uri=myApp:myPass@136.115.95.246 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@136.115.95.246 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tLlptvfyq0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.01xPPNnKGT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tLlptvfyq0 ++ cat /tmp/tmp.01xPPNnKGT ++ rm /tmp/tmp.tLlptvfyq0 /tmp/tmp.01xPPNnKGT ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@136.115.95.246:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.zlunaXIQB1 ++ mktemp + local LAST_ERR=/tmp/tmp.LrpxwoHhZG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@136.115.95.246:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zlunaXIQB1 + cat /tmp/tmp.LrpxwoHhZG + rm /tmp/tmp.zlunaXIQB1 /tmp/tmp.LrpxwoHhZG + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find ++ get_service_ip local-balancer-rs0-1 ++ local service=local-balancer-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nodVEQTpfB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XjJAOFYitQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nodVEQTpfB +++ cat /tmp/tmp.XjJAOFYitQ +++ rm /tmp/tmp.nodVEQTpfB /tmp/tmp.XjJAOFYitQ +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6uNNYi9Vpd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZCn8alt42Z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6uNNYi9Vpd +++ cat /tmp/tmp.ZCn8alt42Z +++ rm /tmp/tmp.6uNNYi9Vpd /tmp/tmp.ZCn8alt42Z +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ud9PKbNUlP +++ mktemp ++ local LAST_ERR=/tmp/tmp.fJcStYDQXx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ud9PKbNUlP ++ cat /tmp/tmp.fJcStYDQXx ++ rm /tmp/tmp.ud9PKbNUlP /tmp/tmp.fJcStYDQXx ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mD72TGHJ24 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fB1jVN1vKS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-1 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mD72TGHJ24 ++ cat /tmp/tmp.fB1jVN1vKS ++ rm /tmp/tmp.mD72TGHJ24 /tmp/tmp.fB1jVN1vKS ++ return 0 + compare_mongo_cmd find myApp:myPass@34.60.141.8 '' :27017 + local command=find + local uri=myApp:myPass@34.60.141.8 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:22:29+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.60.141.8 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.60.141.8 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.60.141.8 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uo77ndXjOs +++ mktemp ++ local LAST_ERR=/tmp/tmp.ipjnrno2M0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uo77ndXjOs ++ cat /tmp/tmp.ipjnrno2M0 ++ rm /tmp/tmp.uo77ndXjOs /tmp/tmp.ipjnrno2M0 ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.60.141.8:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Y0RFIFpzFJ ++ mktemp + local LAST_ERR=/tmp/tmp.urB7vCFa3t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.60.141.8:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y0RFIFpzFJ + cat /tmp/tmp.urB7vCFa3t + rm /tmp/tmp.Y0RFIFpzFJ /tmp/tmp.urB7vCFa3t + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find ++ get_service_ip local-balancer-rs0-2 ++ local service=local-balancer-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.07SpqasD8y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wsiMTX6Ko5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/local-balancer -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.07SpqasD8y +++ cat /tmp/tmp.wsiMTX6Ko5 +++ rm /tmp/tmp.07SpqasD8y /tmp/tmp.wsiMTX6Ko5 +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qpL67M2ZVD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QArOGWaj5C +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.qpL67M2ZVD +++ cat /tmp/tmp.QArOGWaj5C +++ rm /tmp/tmp.qpL67M2ZVD /tmp/tmp.QArOGWaj5C +++ return 0 ++ service_type=LoadBalancer ++ '[' LoadBalancer = ClusterIP ']' ++ '[' LoadBalancer = NodePort ']' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[]}' ++ grep -E -q 'hostname|ip' ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LRXsR5NeKN +++ mktemp ++ local LAST_ERR=/tmp/tmp.vwXOnOw5CD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LRXsR5NeKN ++ cat /tmp/tmp.vwXOnOw5CD ++ rm /tmp/tmp.LRXsR5NeKN /tmp/tmp.vwXOnOw5CD ++ return 0 ++ kubectl_bin get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.diPtJE3M2j +++ mktemp ++ local LAST_ERR=/tmp/tmp.JSuqbjJbKe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/local-balancer-rs0-2 -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.diPtJE3M2j ++ cat /tmp/tmp.JSuqbjJbKe ++ rm /tmp/tmp.diPtJE3M2j /tmp/tmp.JSuqbjJbKe ++ return 0 + compare_mongo_cmd find myApp:myPass@34.55.87.252 '' :27017 + local command=find + local uri=myApp:myPass@34.55.87.252 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:22:36+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.55.87.252 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.55.87.252 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.55.87.252 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Gnq1hYFURg +++ mktemp ++ local LAST_ERR=/tmp/tmp.gj4x7QLgOY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Gnq1hYFURg ++ cat /tmp/tmp.gj4x7QLgOY ++ rm /tmp/tmp.Gnq1hYFURg /tmp/tmp.gj4x7QLgOY ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.55.87.252:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.RKBMuG07Ab ++ mktemp + local LAST_ERR=/tmp/tmp.hwaQF6mjLI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.55.87.252:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RKBMuG07Ab + cat /tmp/tmp.hwaQF6mjLI + rm /tmp/tmp.RKBMuG07Ab /tmp/tmp.hwaQF6mjLI + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find + [[ local-balancer-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'delete PSMDB cluster local-balancer-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster local-balancer-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DdQfqiv9H9 ++ mktemp + local LAST_ERR=/tmp/tmp.QqloNay150 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/local-balancer-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DdQfqiv9H9 perconaservermongodb.psmdb.percona.com "local-balancer" deleted from service-per-pod-27363 namespace + cat /tmp/tmp.QqloNay150 + rm /tmp/tmp.DdQfqiv9H9 /tmp/tmp.QqloNay150 + return 0 + desc 'check NodePort' + set +o xtrace ----------------------------------------------------------------------------------- check NodePort ----------------------------------------------------------------------------------- + check_cr_config node-port-rs0 + local cluster=node-port-rs0 + desc 'create PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- create PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/node-port-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/node-port-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/node-port-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2285-ace12b60"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.WMl0nIUOJf + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-27363/g ++ mktemp + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_ERR=/tmp/tmp.QcbT4RvomW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WMl0nIUOJf perconaservermongodb.psmdb.percona.com/node-port created + cat /tmp/tmp.QcbT4RvomW + rm /tmp/tmp.WMl0nIUOJf /tmp/tmp.QcbT4RvomW + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running node-port-rs0 3 false + local name=node-port-rs0 + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=node-port ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod node-port-rs0-0 + local pod=node-port-rs0-0 + set +o xtrace waiting for pod/node-port-rs0-0 to be ready.........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod node-port-rs0-1 + local pod=node-port-rs0-1 + set +o xtrace waiting for pod/node-port-rs0-1 to be ready......OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1dBjsq6JrG +++ mktemp ++ local LAST_ERR=/tmp/tmp.A4rnLcZGWb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1dBjsq6JrG ++ cat /tmp/tmp.A4rnLcZGWb ++ rm /tmp/tmp.1dBjsq6JrG /tmp/tmp.A4rnLcZGWb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod node-port-rs0-2 + local pod=node-port-rs0-2 + set +o xtrace waiting for pod/node-port-rs0-2 to be ready....OK ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K9jYGefkdv +++ mktemp ++ local LAST_ERR=/tmp/tmp.D5R1mspq0C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K9jYGefkdv ++ cat /tmp/tmp.D5R1mspq0C ++ rm /tmp/tmp.K9jYGefkdv /tmp/tmp.D5R1mspq0C ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wI707rz6UL +++ mktemp ++ local LAST_ERR=/tmp/tmp.3QQH5SAhBQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb node-port -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wI707rz6UL ++ cat /tmp/tmp.3QQH5SAhBQ ++ rm /tmp/tmp.wI707rz6UL /tmp/tmp.3QQH5SAhBQ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/node-port-rs0 + local resource=statefulset/node-port-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml + local new_result=/tmp/tmp.WHbJ0yO9r7/statefulset_node-port-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/node-port-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-27363", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.HAEujO5c1K ++ mktemp + local LAST_ERR=/tmp/tmp.hyeU2HXmKV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/node-port-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HAEujO5c1K + cat /tmp/tmp.hyeU2HXmKV + rm /tmp/tmp.HAEujO5c1K /tmp/tmp.hyeU2HXmKV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.WHbJ0yO9r7/statefulset_node-port-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.WHbJ0yO9r7/statefulset_node-port-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.WHbJ0yO9r7/statefulset_node-port-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/statefulset_node-port-rs0.yml /tmp/tmp.WHbJ0yO9r7/statefulset_node-port-rs0.yml + log 'compare_kubectl: statefulset/node-port-rs0 OK' + set +o xtrace [2026-03-15T19:24:10+0000] compare_kubectl: statefulset/node-port-rs0 OK + compare_kubectl service/node-port-rs0-0 + local resource=service/node-port-rs0-0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml + local new_result=/tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-27363", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.d1R2gRidvA ++ mktemp + local LAST_ERR=/tmp/tmp.lYiNS1khDm + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/node-port-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d1R2gRidvA + cat /tmp/tmp.lYiNS1khDm + rm /tmp/tmp.d1R2gRidvA /tmp/tmp.lYiNS1khDm + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + version_gt 1.22 ++ bc -l ++ echo '1.32 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0.yml /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + log 'compare_kubectl: service/node-port-rs0-0 OK' + set +o xtrace [2026-03-15T19:24:11+0000] compare_kubectl: service/node-port-rs0-0 OK ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zZAtAqrxRl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.E71KvYLIsO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zZAtAqrxRl +++ cat /tmp/tmp.E71KvYLIsO +++ rm /tmp/tmp.zZAtAqrxRl /tmp/tmp.E71KvYLIsO +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PoiozzgWeQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IOgTSfaKrY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PoiozzgWeQ +++ cat /tmp/tmp.IOgTSfaKrY +++ rm /tmp/tmp.PoiozzgWeQ /tmp/tmp.IOgTSfaKrY +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TlQxignNqn +++ mktemp ++ local LAST_ERR=/tmp/tmp.2bEu4knMKn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TlQxignNqn ++ cat /tmp/tmp.2bEu4knMKn ++ rm /tmp/tmp.TlQxignNqn /tmp/tmp.2bEu4knMKn ++ return 0 ++ return ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wd0Lo3xCSI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PbtsJWGtWs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wd0Lo3xCSI +++ cat /tmp/tmp.PbtsJWGtWs +++ rm /tmp/tmp.wd0Lo3xCSI /tmp/tmp.PbtsJWGtWs +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.l5nHHplQSn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VuYiSG05gO +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.l5nHHplQSn +++ cat /tmp/tmp.VuYiSG05gO +++ rm /tmp/tmp.l5nHHplQSn /tmp/tmp.VuYiSG05gO +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0kMrgNI7jk +++ mktemp ++ local LAST_ERR=/tmp/tmp.p2TXAzmhX9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0kMrgNI7jk ++ cat /tmp/tmp.p2TXAzmhX9 ++ rm /tmp/tmp.0kMrgNI7jk /tmp/tmp.p2TXAzmhX9 ++ return 0 ++ return ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jpLBbZazEn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DKtoA86dlV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jpLBbZazEn +++ cat /tmp/tmp.DKtoA86dlV +++ rm /tmp/tmp.jpLBbZazEn /tmp/tmp.DKtoA86dlV +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RfNIgZjKz8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IrGm4Ha9G4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RfNIgZjKz8 +++ cat /tmp/tmp.IrGm4Ha9G4 +++ rm /tmp/tmp.RfNIgZjKz8 /tmp/tmp.IrGm4Ha9G4 +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0vtybM8rry +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Br5TEKSRg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0vtybM8rry ++ cat /tmp/tmp.6Br5TEKSRg ++ rm /tmp/tmp.0vtybM8rry /tmp/tmp.6Br5TEKSRg ++ return 0 ++ return + local URI=34.118.239.233,34.118.236.129,34.118.232.13 + sleep 30 + desc 'create user myApp' + set +o xtrace ----------------------------------------------------------------------------------- create user myApp ----------------------------------------------------------------------------------- + run_mongo 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@34.118.239.233,34.118.236.129,34.118.232.13 mongodb :27017 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@34.118.239.233,34.118.236.129,34.118.232.13 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@34.118.239.233,34.118.236.129,34.118.232.13 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MANA5YuBwz +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zv15rL1hjA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MANA5YuBwz ++ cat /tmp/tmp.Zv15rL1hjA ++ rm /tmp/tmp.MANA5YuBwz /tmp/tmp.Zv15rL1hjA ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.239.233,34.118.236.129,34.118.232.13:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.YBdDpqPnP0 ++ mktemp + local LAST_ERR=/tmp/tmp.i3DjECPXEt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@34.118.239.233,34.118.236.129,34.118.232.13:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YBdDpqPnP0 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.239.233:27017,34.118.236.129:27017,34.118.232.13:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("f3a5cd37-3de5-45df-a030-3d7a0986303a") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.i3DjECPXEt + rm /tmp/tmp.YBdDpqPnP0 /tmp/tmp.i3DjECPXEt + return 0 + sleep 10 + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongo 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@34.118.239.233,34.118.236.129,34.118.232.13 mongodb :27017 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@34.118.239.233,34.118.236.129,34.118.232.13 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.239.233,34.118.236.129,34.118.232.13 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OXvZrbTA6M +++ mktemp ++ local LAST_ERR=/tmp/tmp.qfubzSEYzt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OXvZrbTA6M ++ cat /tmp/tmp.qfubzSEYzt ++ rm /tmp/tmp.OXvZrbTA6M /tmp/tmp.qfubzSEYzt ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.239.233,34.118.236.129,34.118.232.13:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.9XSAOJOGz2 ++ mktemp + local LAST_ERR=/tmp/tmp.FuJShekKar + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@34.118.239.233,34.118.236.129,34.118.232.13:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9XSAOJOGz2 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://34.118.239.233:27017,34.118.236.129:27017,34.118.232.13:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("976193ba-a940-4dc7-a2fe-35a29dd84fe8") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.FuJShekKar + rm /tmp/tmp.9XSAOJOGz2 /tmp/tmp.FuJShekKar + return 0 + sleep 30 ++ get_service_ip node-port-rs0-0 ++ local service=node-port-rs0-0 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lyzc53OHHF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HPLuZ4vr8D +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lyzc53OHHF +++ cat /tmp/tmp.HPLuZ4vr8D +++ rm /tmp/tmp.lyzc53OHHF /tmp/tmp.HPLuZ4vr8D +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nJMrVKQOVx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CfEDqolLRf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nJMrVKQOVx +++ cat /tmp/tmp.CfEDqolLRf +++ rm /tmp/tmp.nJMrVKQOVx /tmp/tmp.CfEDqolLRf +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TUw2HgmRBQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.hvpZqq3J9C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-0 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TUw2HgmRBQ ++ cat /tmp/tmp.hvpZqq3J9C ++ rm /tmp/tmp.TUw2HgmRBQ /tmp/tmp.hvpZqq3J9C ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.239.233 '' :27017 + local command=find + local uri=myApp:myPass@34.118.239.233 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:25:42+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.239.233 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.239.233 + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.239.233 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZyjAY4RYUy +++ mktemp ++ local LAST_ERR=/tmp/tmp.1RacOfj6jI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZyjAY4RYUy ++ cat /tmp/tmp.1RacOfj6jI ++ rm /tmp/tmp.ZyjAY4RYUy /tmp/tmp.1RacOfj6jI ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.239.233:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BAHoCYQTr1 ++ mktemp + local LAST_ERR=/tmp/tmp.Mr77H3eOEU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.239.233:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BAHoCYQTr1 + cat /tmp/tmp.Mr77H3eOEU + rm /tmp/tmp.BAHoCYQTr1 /tmp/tmp.Mr77H3eOEU + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find ++ get_service_ip node-port-rs0-1 ++ local service=node-port-rs0-1 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KeCovbgeou ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QjvRfVTStS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KeCovbgeou +++ cat /tmp/tmp.QjvRfVTStS +++ rm /tmp/tmp.KeCovbgeou /tmp/tmp.QjvRfVTStS +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.20bOHtKf8J ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KkVT6sqT7r +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.20bOHtKf8J +++ cat /tmp/tmp.KkVT6sqT7r +++ rm /tmp/tmp.20bOHtKf8J /tmp/tmp.KkVT6sqT7r +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EA4aQiVAD5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XQ6vZVC5eL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-1 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EA4aQiVAD5 ++ cat /tmp/tmp.XQ6vZVC5eL ++ rm /tmp/tmp.EA4aQiVAD5 /tmp/tmp.XQ6vZVC5eL ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.236.129 '' :27017 + local command=find + local uri=myApp:myPass@34.118.236.129 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:25:49+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.236.129 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.236.129 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.236.129 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RkVFLuZXFf +++ mktemp ++ local LAST_ERR=/tmp/tmp.lZAJriJOv7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RkVFLuZXFf ++ cat /tmp/tmp.lZAJriJOv7 ++ rm /tmp/tmp.RkVFLuZXFf /tmp/tmp.lZAJriJOv7 ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.236.129:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.uDnF1XQyIr ++ mktemp + local LAST_ERR=/tmp/tmp.ro5yVmN0FJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.236.129:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uDnF1XQyIr + cat /tmp/tmp.ro5yVmN0FJ + rm /tmp/tmp.uDnF1XQyIr /tmp/tmp.ro5yVmN0FJ + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find ++ get_service_ip node-port-rs0-2 ++ local service=node-port-rs0-2 ++ local server_type=rs0 +++ kubectl_bin get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GSDUeik9IA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UoDkiSxmii +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get psmdb/node-port -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GSDUeik9IA +++ cat /tmp/tmp.UoDkiSxmii +++ rm /tmp/tmp.GSDUeik9IA /tmp/tmp.UoDkiSxmii +++ return 0 ++ '[' true '!=' true ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++ grep -q NotFound +++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NSjNtKk53Z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RIdY73azDz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.type}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NSjNtKk53Z +++ cat /tmp/tmp.RIdY73azDz +++ rm /tmp/tmp.NSjNtKk53Z /tmp/tmp.RIdY73azDz +++ return 0 ++ service_type=NodePort ++ '[' NodePort = ClusterIP ']' ++ '[' NodePort = NodePort ']' ++ kubectl_bin get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3le14S7KhW +++ mktemp ++ local LAST_ERR=/tmp/tmp.lq2DreaTk3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get service/node-port-rs0-2 -o 'jsonpath={.spec.clusterIP}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3le14S7KhW ++ cat /tmp/tmp.lq2DreaTk3 ++ rm /tmp/tmp.3le14S7KhW /tmp/tmp.lq2DreaTk3 ++ return 0 ++ return + compare_mongo_cmd find myApp:myPass@34.118.232.13 '' :27017 + local command=find + local uri=myApp:myPass@34.118.232.13 + local postfix= + local suffix=:27017 + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2026-03-15T19:25:55+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@34.118.232.13 mongodb :27017 '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@34.118.232.13 + local driver=mongodb + local suffix=:27017 + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@34.118.232.13 == *cfg* ]] + grep -E -v 'I NETWORK|W NETWORK|F NETWORK|"c":"NETWORK"|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CiYuhFEu4T +++ mktemp ++ local LAST_ERR=/tmp/tmp.dfoQ24NVfp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CiYuhFEu4T ++ cat /tmp/tmp.dfoQ24NVfp ++ rm /tmp/tmp.CiYuhFEu4T /tmp/tmp.dfoQ24NVfp ++ return 0 + local client_container=psmdb-client-bb8b97679-lkr2w + kubectl_bin exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.13:27017/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.OdUguK9RpR ++ mktemp + local LAST_ERR=/tmp/tmp.WvtJKsgg5W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-bb8b97679-lkr2w -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@34.118.232.13:27017/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OdUguK9RpR + cat /tmp/tmp.WvtJKsgg5W + rm /tmp/tmp.OdUguK9RpR /tmp/tmp.WvtJKsgg5W + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/find.json /tmp/tmp.WHbJ0yO9r7/find + [[ node-port-rs0 == \n\o\d\e\-\p\o\r\t\-\r\s\0 ]] + desc 'add service-per-pod label and annotation' + set +o xtrace ----------------------------------------------------------------------------------- add service-per-pod label and annotation ----------------------------------------------------------------------------------- ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MeZQl5fqXN +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZaCkCBi0P1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MeZQl5fqXN ++ cat /tmp/tmp.ZaCkCBi0P1 ++ rm /tmp/tmp.MeZQl5fqXN /tmp/tmp.ZaCkCBi0P1 ++ return 0 + old_node_port=31002 + kubectl_bin patch psmdb node-port --type=json --patch '[ { "op": "add", "path": "/spec/replsets/0/expose/annotations", "value": { "test": "service-per-pod", } }, { "op": "add", "path": "/spec/replsets/0/expose/labels", "value": { "test": "service-per-pod", } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.oOVJ7CrFTf ++ mktemp + local LAST_ERR=/tmp/tmp.VhHJ6vnbVs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb node-port --type=json --patch '[ { "op": "add", "path": "/spec/replsets/0/expose/annotations", "value": { "test": "service-per-pod", } }, { "op": "add", "path": "/spec/replsets/0/expose/labels", "value": { "test": "service-per-pod", } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oOVJ7CrFTf perconaservermongodb.psmdb.percona.com/node-port patched + cat /tmp/tmp.VhHJ6vnbVs + rm /tmp/tmp.oOVJ7CrFTf /tmp/tmp.VhHJ6vnbVs + return 0 + sleep 5 + desc 'check if service created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service created with expected config ----------------------------------------------------------------------------------- + compare_kubectl service/node-port-rs0-0 -updated + local resource=service/node-port-rs0-0 + local postfix=-updated + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml + local new_result=/tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated-oc.yml ']' + kubectl_bin get -o yaml service/node-port-rs0-0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("service-per-pod-27363", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ynz0b8ecV3 ++ mktemp + local LAST_ERR=/tmp/tmp.BUOzTtl8Iw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/node-port-rs0-0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ynz0b8ecV3 + cat /tmp/tmp.BUOzTtl8Iw + rm /tmp/tmp.ynz0b8ecV3 /tmp/tmp.BUOzTtl8Iw + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/compare/service_node-port-rs0-0-updated.yml /tmp/tmp.WHbJ0yO9r7/service_node-port-rs0-0.yml + log 'compare_kubectl: service/node-port-rs0-0 OK' + set +o xtrace [2026-03-15T19:26:06+0000] compare_kubectl: service/node-port-rs0-0 OK ++ kubectl_bin get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7ROhiuXrDf +++ mktemp ++ local LAST_ERR=/tmp/tmp.85Q6lgMXB3 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get svc node-port-rs0-0 -o 'jsonpath={.spec.ports[0].nodePort}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7ROhiuXrDf ++ cat /tmp/tmp.85Q6lgMXB3 ++ rm /tmp/tmp.7ROhiuXrDf /tmp/tmp.85Q6lgMXB3 ++ return 0 + current_node_port=31002 + [[ 31002 != \3\1\0\0\2 ]] + desc 'delete PSMDB cluster node-port-rs0' + set +o xtrace ----------------------------------------------------------------------------------- delete PSMDB cluster node-port-rs0 ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/node-port-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.rnTG3DsXuB ++ mktemp + local LAST_ERR=/tmp/tmp.HioBcgsmDz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/node-port-rs0.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rnTG3DsXuB perconaservermongodb.psmdb.percona.com "node-port" deleted from service-per-pod-27363 namespace + cat /tmp/tmp.HioBcgsmDz + rm /tmp/tmp.rnTG3DsXuB /tmp/tmp.HioBcgsmDz + return 0 + desc 'check Mongos in sharded cluster' + set +o xtrace ----------------------------------------------------------------------------------- check Mongos in sharded cluster ----------------------------------------------------------------------------------- + local cluster=some-name + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/sharded.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/sharded.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/e2e-tests/service-per-pod/conf/sharded.yml + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' ++ mktemp + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2285-ace12b60"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.z0DI2bhyiL + yq eval '.spec.upgradeOptions.apply="Never"' + /usr/sbin/sed -e s/NAME_SPACE/service-per-pod-27363/g ++ mktemp + local LAST_ERR=/tmp/tmp.z7v7uoy02G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z0DI2bhyiL perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.z7v7uoy02G + rm /tmp/tmp.z0DI2bhyiL /tmp/tmp.z7v7uoy02G + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready...............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GcW49KGmWi +++ mktemp ++ local LAST_ERR=/tmp/tmp.3MK0e4Xq5X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GcW49KGmWi ++ cat /tmp/tmp.3MK0e4Xq5X ++ rm /tmp/tmp.GcW49KGmWi /tmp/tmp.3MK0e4Xq5X ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6OgszjUZTb +++ mktemp ++ local LAST_ERR=/tmp/tmp.rTwLNtGhu6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6OgszjUZTb ++ cat /tmp/tmp.rTwLNtGhu6 ++ rm /tmp/tmp.6OgszjUZTb /tmp/tmp.rTwLNtGhu6 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QHbdsW5br3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uvvGW4ZfqH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QHbdsW5br3 ++ cat /tmp/tmp.uvvGW4ZfqH ++ rm /tmp/tmp.QHbdsW5br3 /tmp/tmp.uvvGW4ZfqH ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............................. + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2m64gLF7WH +++ mktemp ++ local LAST_ERR=/tmp/tmp.siUPxgFtoV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2m64gLF7WH ++ cat /tmp/tmp.siUPxgFtoV ++ rm /tmp/tmp.2m64gLF7WH /tmp/tmp.siUPxgFtoV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y41I3UONL2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UZlNc6mbJV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y41I3UONL2 ++ cat /tmp/tmp.UZlNc6mbJV ++ rm /tmp/tmp.Y41I3UONL2 /tmp/tmp.UZlNc6mbJV ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vn4YoL3TPq +++ mktemp ++ local LAST_ERR=/tmp/tmp.7Jz2fGP2Sa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vn4YoL3TPq ++ cat /tmp/tmp.7Jz2fGP2Sa ++ rm /tmp/tmp.vn4YoL3TPq /tmp/tmp.7Jz2fGP2Sa ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XXtl7CPEji +++ mktemp ++ local LAST_ERR=/tmp/tmp.I4wg79VnKO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XXtl7CPEji ++ cat /tmp/tmp.I4wg79VnKO ++ rm /tmp/tmp.XXtl7CPEji /tmp/tmp.I4wg79VnKO ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fmCKtE2w7V +++ mktemp ++ local LAST_ERR=/tmp/tmp.OOC3FEj7pj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fmCKtE2w7V ++ cat /tmp/tmp.OOC3FEj7pj ++ rm /tmp/tmp.fmCKtE2w7V /tmp/tmp.OOC3FEj7pj ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UuF2mqxt3i +++ mktemp ++ local LAST_ERR=/tmp/tmp.qWHzl9Ym4z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UuF2mqxt3i ++ cat /tmp/tmp.qWHzl9Ym4z ++ rm /tmp/tmp.UuF2mqxt3i /tmp/tmp.qWHzl9Ym4z ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + desc 'enabling servicePerPod for mongos' + set +o xtrace ----------------------------------------------------------------------------------- enabling servicePerPod for mongos ----------------------------------------------------------------------------------- + kubectl patch psmdb some-name --type=merge -p '{"spec":{"sharding":{"mongos":{"expose":{"servicePerPod":true}}}}}' perconaservermongodb.psmdb.percona.com/some-name patched + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3NniO3dMR6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QJxGfmJxHz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3NniO3dMR6 ++ cat /tmp/tmp.QJxGfmJxHz ++ rm /tmp/tmp.3NniO3dMR6 /tmp/tmp.QJxGfmJxHz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o9J7HKdRYe +++ mktemp ++ local LAST_ERR=/tmp/tmp.DqEQJHCU4j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o9J7HKdRYe ++ cat /tmp/tmp.DqEQJHCU4j ++ rm /tmp/tmp.o9J7HKdRYe /tmp/tmp.DqEQJHCU4j ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6GMzCasqey +++ mktemp ++ local LAST_ERR=/tmp/tmp.luIoykrF22 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6GMzCasqey ++ cat /tmp/tmp.luIoykrF22 ++ rm /tmp/tmp.6GMzCasqey /tmp/tmp.luIoykrF22 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + check_service present some-name-mongos-0 + state=present + svc_name=some-name-mongos-0 + '[' present = present ']' + echo -n 'check that some-name-mongos-0 was created' check that some-name-mongos-0 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-0 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service present some-name-mongos-1 + state=present + svc_name=some-name-mongos-1 + '[' present = present ']' + echo -n 'check that some-name-mongos-1 was created' check that some-name-mongos-1 was created+ local timeout=0 + grep -vq NotFound + kubectl_bin get service/some-name-mongos-1 -o 'jsonpath={.spec.type}' + echo .OK .OK + check_service present some-name-mongos-2 + state=present + svc_name=some-name-mongos-2 + '[' present = present ']' + echo -n 'check that some-name-mongos-2 was created' check that some-name-mongos-2 was created+ local timeout=0 + kubectl_bin get service/some-name-mongos-2 -o 'jsonpath={.spec.type}' + grep -vq NotFound + echo .OK .OK + check_service removed some-name-mongos + state=removed + svc_name=some-name-mongos + '[' removed = present ']' + '[' removed = removed ']' + echo -n 'check that some-name-mongos was removed' check that some-name-mongos was removed++ kubectl_bin get service/some-name-mongos -o 'jsonpath={.spec.type}' ++ grep NotFound + [[ -z Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found Error from server (NotFound): services "some-name-mongos" not found ]] + echo .OK .OK + destroy service-per-pod-27363 + local namespace=service-per-pod-27363 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.4KoModAgFl +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fc362ed7J9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4KoModAgFl ++ cat /tmp/tmp.Fc362ed7J9 No resources found in service-per-pod-27363 namespace. ++ rm /tmp/tmp.4KoModAgFl /tmp/tmp.Fc362ed7J9 ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.8Pu2tsiYje ++ mktemp + local LAST_ERR=/tmp/tmp.uXvANPd2gS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8Pu2tsiYje customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.uXvANPd2gS + rm /tmp/tmp.8Pu2tsiYje /tmp/tmp.uXvANPd2gS + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.uAV0Ycwnpq ++ mktemp + local LAST_ERR=/tmp/tmp.jKiQ0VJF04 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uAV0Ycwnpq + cat /tmp/tmp.jKiQ0VJF04 + rm /tmp/tmp.uAV0Ycwnpq /tmp/tmp.jKiQ0VJF04 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.e2TCy9dhBA ++ mktemp + local LAST_ERR=/tmp/tmp.S3MuuBPZGy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.e2TCy9dhBA + cat /tmp/tmp.S3MuuBPZGy + rm /tmp/tmp.e2TCy9dhBA /tmp/tmp.S3MuuBPZGy + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Lb93l2vArb ++ mktemp + local LAST_ERR=/tmp/tmp.ZwGOkAi622 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Lb93l2vArb + cat /tmp/tmp.ZwGOkAi622 + rm /tmp/tmp.Lb93l2vArb /tmp/tmp.ZwGOkAi622 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.4AXula9rIC ++ mktemp + local LAST_ERR=/tmp/tmp.of0Mo4lbuu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2285/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4AXula9rIC clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.of0Mo4lbuu + rm /tmp/tmp.4AXula9rIC /tmp/tmp.of0Mo4lbuu + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.jrIgMHMBPy ++ mktemp + local LAST_ERR=/tmp/tmp.f01LTsnwrg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jrIgMHMBPy namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.f01LTsnwrg Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jrIgMHMBPy namespace "cert-manager" deleted + cat /tmp/tmp.f01LTsnwrg Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jrIgMHMBPy + cat /tmp/tmp.f01LTsnwrg Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.jrIgMHMBPy + cat /tmp/tmp.f01LTsnwrg Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.jrIgMHMBPy /tmp/tmp.f01LTsnwrg + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace service-per-pod-27363 + rm -rf /tmp/tmp.WHbJ0yO9r7 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.YlAH4kA0ch + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.hcuuU0lgya ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.0bWZsJiEuV + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.Q2RbFwSq70 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace service-per-pod-27363