++ echo 'Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/logs/expose-sharded.log' Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/logs/expose-sharded.log ++ '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP_AZURE= ++ oc get projects ++ kubectl get nodes ++ grep '^minikube' +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' grep: warning: stray \ before - Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep gke Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 ++ '[' v1.31.14-gke.1081000 ']' ++ GKE=1 +++ kubectl version -o json +++ /usr/sbin/sed -r 's/[^0-9.]+//g' +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 ++ KUBE_VERSION=1.31 + main + create_infra expose-sharded-9120 + local ns=expose-sharded-9120 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.Qink7EtaQm ++ mktemp + local LAST_ERR=/tmp/tmp.U2teOBLkNC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qink7EtaQm customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.U2teOBLkNC + rm /tmp/tmp.Qink7EtaQm /tmp/tmp.U2teOBLkNC + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.1qoyGGz6Lv ++ mktemp + local LAST_ERR=/tmp/tmp.T4a2w2DyhI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1qoyGGz6Lv + cat /tmp/tmp.T4a2w2DyhI + rm /tmp/tmp.1qoyGGz6Lv /tmp/tmp.T4a2w2DyhI + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.dTPgc0CXYZ ++ mktemp + local LAST_ERR=/tmp/tmp.mN5vE4Gpm1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dTPgc0CXYZ + cat /tmp/tmp.mN5vE4Gpm1 + rm /tmp/tmp.dTPgc0CXYZ /tmp/tmp.mN5vE4Gpm1 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.b5Bo4TElkf ++ mktemp + local LAST_ERR=/tmp/tmp.zXqxa5nwjr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b5Bo4TElkf + cat /tmp/tmp.zXqxa5nwjr + rm /tmp/tmp.b5Bo4TElkf /tmp/tmp.zXqxa5nwjr + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.j4jlApMkcg ++ mktemp + local LAST_ERR=/tmp/tmp.RGUyY1BbMK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.j4jlApMkcg clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.RGUyY1BbMK + rm /tmp/tmp.j4jlApMkcg /tmp/tmp.RGUyY1BbMK + return 0 + check_crd_for_deletion PR-2158-c70b6c4a + local git_tag=PR-2158-c70b6c4a ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2158-c70b6c4a/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xoO6pytAMO +++ mktemp ++ local LAST_ERR=/tmp/tmp.AZjJVmeye0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xoO6pytAMO ++ cat /tmp/tmp.AZjJVmeye0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xoO6pytAMO ++ cat /tmp/tmp.AZjJVmeye0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xoO6pytAMO ++ cat /tmp/tmp.AZjJVmeye0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.xoO6pytAMO ++ cat /tmp/tmp.AZjJVmeye0 Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.xoO6pytAMO /tmp/tmp.AZjJVmeye0 ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' ++ mktemp + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.yGg4G54zTa ++ mktemp egrep: warning: egrep is obsolescent; using grep -E + local LAST_ERR=/tmp/tmp.K20JO2PkcI + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.ZbrlYZslCK ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.ZGaOSUsCRz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yGg4G54zTa + cat /tmp/tmp.K20JO2PkcI + rm /tmp/tmp.yGg4G54zTa /tmp/tmp.K20JO2PkcI + return 0 namespace "expose-sharded-31057" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZbrlYZslCK namespace "psmdb-operator" deleted + cat /tmp/tmp.ZGaOSUsCRz + rm /tmp/tmp.ZbrlYZslCK /tmp/tmp.ZGaOSUsCRz + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.aGGxS2NTiQ ++ mktemp + local LAST_ERR=/tmp/tmp.vepdGgyNzP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aGGxS2NTiQ + cat /tmp/tmp.vepdGgyNzP + rm /tmp/tmp.aGGxS2NTiQ /tmp/tmp.vepdGgyNzP + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Fg72rnIE9B ++ mktemp + local LAST_ERR=/tmp/tmp.YJlp2ldEXO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fg72rnIE9B namespace/psmdb-operator created + cat /tmp/tmp.YJlp2ldEXO + rm /tmp/tmp.Fg72rnIE9B /tmp/tmp.YJlp2ldEXO + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.2Esn6SbpXs +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ixs7WCvO04 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2Esn6SbpXs ++ cat /tmp/tmp.Ixs7WCvO04 ++ rm /tmp/tmp.2Esn6SbpXs /tmp/tmp.Ixs7WCvO04 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2158-c70b6c4a-2-cluster6 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WLCdExLtmc ++ mktemp + local LAST_ERR=/tmp/tmp.KX3jtux3G2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2158-c70b6c4a-2-cluster6 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WLCdExLtmc Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2158-c70b6c4a-2-cluster6" modified. + cat /tmp/tmp.KX3jtux3G2 + rm /tmp/tmp.WLCdExLtmc /tmp/tmp.KX3jtux3G2 + return 0 + deploy_operator + desc 'start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2158-c70b6c4a' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: perconalab/percona-server-mongodb-operator:PR-2158-c70b6c4a ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.aMKhdAswAJ ++ mktemp + local LAST_ERR=/tmp/tmp.m4HZpizN3A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aMKhdAswAJ customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.m4HZpizN3A + rm /tmp/tmp.aMKhdAswAJ /tmp/tmp.m4HZpizN3A + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OF99W67U9f ++ mktemp + local LAST_ERR=/tmp/tmp.KytGK07K4A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OF99W67U9f clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.KytGK07K4A + rm /tmp/tmp.OF99W67U9f /tmp/tmp.KytGK07K4A + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-2158-c70b6c4a") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.BP6sDDWLe3 ++ mktemp + local LAST_ERR=/tmp/tmp.LD4C1KkWV9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BP6sDDWLe3 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.LD4C1KkWV9 + rm /tmp/tmp.BP6sDDWLe3 /tmp/tmp.LD4C1KkWV9 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.a0xAXYEtBY +++ mktemp ++ local LAST_ERR=/tmp/tmp.lVL6aqAHuw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.a0xAXYEtBY ++ cat /tmp/tmp.lVL6aqAHuw ++ rm /tmp/tmp.a0xAXYEtBY /tmp/tmp.lVL6aqAHuw ++ return 0 + wait_operator_pod percona-server-mongodb-operator-6999c84b9c-pn67d + local pod=percona-server-mongodb-operator-6999c84b9c-pn67d + set +o xtrace waiting for pod/percona-server-mongodb-operator-6999c84b9c-pn67d to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.PnfO4QfBHC +++ mktemp ++ local LAST_ERR=/tmp/tmp.TSFrTRzpwa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.PnfO4QfBHC ++ cat /tmp/tmp.TSFrTRzpwa ++ rm /tmp/tmp.PnfO4QfBHC /tmp/tmp.TSFrTRzpwa ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-6999c84b9c-pn67d ++ mktemp + local LAST_OUT=/tmp/tmp.PBAM9UbvEI ++ mktemp + local LAST_ERR=/tmp/tmp.5ZqSme4C2b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-6999c84b9c-pn67d + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PBAM9UbvEI + cat /tmp/tmp.5ZqSme4C2b + rm /tmp/tmp.PBAM9UbvEI /tmp/tmp.5ZqSme4C2b + return 0 2025-12-23T13:48:22.004Z INFO setup Manager starting up {"gitCommit": "c70b6c4a288beb141674b1787dd0e83d6ad0098a", "gitBranch": "PR-2158-c70b6c4a", "buildTime": "", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} + create_namespace expose-sharded-9120 + local namespace=expose-sharded-9120 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces expose-sharded-9120' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces expose-sharded-9120 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace expose-sharded-9120 --ignore-not-found + awk '{print$1}' ++ mktemp + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.bv9ed6z9Fk egrep: warning: egrep is obsolescent; using grep -E ++ mktemp + local LAST_OUT=/tmp/tmp.o2QPA7Fzzr ++ mktemp + local LAST_ERR=/tmp/tmp.buaPwNluJV + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.SQoG1HmvmM + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace expose-sharded-9120 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bv9ed6z9Fk + cat /tmp/tmp.SQoG1HmvmM + rm /tmp/tmp.bv9ed6z9Fk /tmp/tmp.SQoG1HmvmM + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.o2QPA7Fzzr + cat /tmp/tmp.buaPwNluJV + rm /tmp/tmp.o2QPA7Fzzr /tmp/tmp.buaPwNluJV + return 0 + kubectl_bin wait --for=delete namespace expose-sharded-9120 ++ mktemp + local LAST_OUT=/tmp/tmp.SK5yl06tM1 ++ mktemp + local LAST_ERR=/tmp/tmp.BZHe3CnPAI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace expose-sharded-9120 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SK5yl06tM1 + cat /tmp/tmp.BZHe3CnPAI + rm /tmp/tmp.SK5yl06tM1 /tmp/tmp.BZHe3CnPAI + return 0 + desc 'create namespace expose-sharded-9120' + set +o xtrace ----------------------------------------------------------------------------------- create namespace expose-sharded-9120 ----------------------------------------------------------------------------------- + kubectl_bin create namespace expose-sharded-9120 ++ mktemp + local LAST_OUT=/tmp/tmp.TulKJh8Khc ++ mktemp + local LAST_ERR=/tmp/tmp.R5roB3Z3jl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace expose-sharded-9120 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TulKJh8Khc namespace/expose-sharded-9120 created + cat /tmp/tmp.R5roB3Z3jl + rm /tmp/tmp.TulKJh8Khc /tmp/tmp.R5roB3Z3jl + return 0 + set_kube_ctx expose-sharded-9120 + local namespace=expose-sharded-9120 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.NlMec8NAHY +++ mktemp ++ local LAST_ERR=/tmp/tmp.AS7gNgoiUc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NlMec8NAHY ++ cat /tmp/tmp.AS7gNgoiUc ++ rm /tmp/tmp.NlMec8NAHY /tmp/tmp.AS7gNgoiUc ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2158-c70b6c4a-2-cluster6 --namespace=expose-sharded-9120 ++ mktemp + local LAST_OUT=/tmp/tmp.XmhvwPANk4 ++ mktemp + local LAST_ERR=/tmp/tmp.X0H9TotUlX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2158-c70b6c4a-2-cluster6 --namespace=expose-sharded-9120 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XmhvwPANk4 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2158-c70b6c4a-2-cluster6" modified. + cat /tmp/tmp.X0H9TotUlX + rm /tmp/tmp.XmhvwPANk4 /tmp/tmp.X0H9TotUlX + return 0 + desc 'create first PSMDB cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster ----------------------------------------------------------------------------------- + cluster=some-name + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.YATLvNZcvB ++ mktemp + local LAST_ERR=/tmp/tmp.9S9ny1BuTz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YATLvNZcvB secret/some-users created deployment.apps/psmdb-client created + cat /tmp/tmp.9S9ny1BuTz + rm /tmp/tmp.YATLvNZcvB /tmp/tmp.9S9ny1BuTz + return 0 + apply_s3_storage_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.3gbAPardZr ++ mktemp + local LAST_ERR=/tmp/tmp.GpU3zzqq4n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3gbAPardZr secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created secret/gcp-cs-sa-key-secret created + cat /tmp/tmp.GpU3zzqq4n + rm /tmp/tmp.3gbAPardZr /tmp/tmp.GpU3zzqq4n + return 0 + version_gt 1.19 ++ echo '1.31 >= 1.19' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' 0 -ne 1 ']' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/container-rc.yaml + /usr/sbin/sed s/docker/runc/g + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PZROJIPx5O ++ mktemp + local LAST_ERR=/tmp/tmp.9zEBeGQRGq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PZROJIPx5O runtimeclass.node.k8s.io/container-rc unchanged + cat /tmp/tmp.9zEBeGQRGq + rm /tmp/tmp.PZROJIPx5O /tmp/tmp.9zEBeGQRGq + return 0 + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/conf/some-name-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/conf/some-name-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/conf/some-name-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod8.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-2158-c70b6c4a"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' + /usr/sbin/sed -e s/NAME_SPACE/expose-sharded-9120/g + local LAST_OUT=/tmp/tmp.8orss4kBtE ++ mktemp + local LAST_ERR=/tmp/tmp.uONdK6JvBf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8orss4kBtE perconaservermongodb.psmdb.percona.com/some-name created + cat /tmp/tmp.uONdK6JvBf + rm /tmp/tmp.8orss4kBtE /tmp/tmp.uONdK6JvBf + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.......OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.....OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LZoN2r8Rs4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QtJ4K2lLzS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LZoN2r8Rs4 ++ cat /tmp/tmp.QtJ4K2lLzS ++ rm /tmp/tmp.LZoN2r8Rs4 /tmp/tmp.QtJ4K2lLzS ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.....OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.elXVvrSU04 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yoxtvBPePQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.elXVvrSU04 ++ cat /tmp/tmp.yoxtvBPePQ ++ rm /tmp/tmp.elXVvrSU04 /tmp/tmp.yoxtvBPePQ ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sK4Rk1s9oz +++ mktemp ++ local LAST_ERR=/tmp/tmp.KhDZjTAPCJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sK4Rk1s9oz ++ cat /tmp/tmp.KhDZjTAPCJ ++ rm /tmp/tmp.sK4Rk1s9oz /tmp/tmp.KhDZjTAPCJ ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................... + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nphNElYzIU +++ mktemp ++ local LAST_ERR=/tmp/tmp.577z7ZQapu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nphNElYzIU ++ cat /tmp/tmp.577z7ZQapu ++ rm /tmp/tmp.nphNElYzIU /tmp/tmp.577z7ZQapu ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1nDHWJlI8q +++ mktemp ++ local LAST_ERR=/tmp/tmp.xaKbMTqxDE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1nDHWJlI8q ++ cat /tmp/tmp.xaKbMTqxDE ++ rm /tmp/tmp.1nDHWJlI8q /tmp/tmp.xaKbMTqxDE ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m92uqbfW0T +++ mktemp ++ local LAST_ERR=/tmp/tmp.jTkYgWRr1V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m92uqbfW0T ++ cat /tmp/tmp.jTkYgWRr1V ++ rm /tmp/tmp.m92uqbfW0T /tmp/tmp.jTkYgWRr1V ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IuIBy92QZ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.PN8XbToBdo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IuIBy92QZ1 ++ cat /tmp/tmp.PN8XbToBdo ++ rm /tmp/tmp.IuIBy92QZ1 /tmp/tmp.PN8XbToBdo ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ptwUCLXtGW +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zpcuqxbyqx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ptwUCLXtGW ++ cat /tmp/tmp.Zpcuqxbyqx ++ rm /tmp/tmp.ptwUCLXtGW /tmp/tmp.Zpcuqxbyqx ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gvFmF0hmxJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.y07rVR8dIB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gvFmF0hmxJ ++ cat /tmp/tmp.y07rVR8dIB ++ rm /tmp/tmp.gvFmF0hmxJ /tmp/tmp.y07rVR8dIB ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oCB8k4Ufu6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wrF5h9oL2Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oCB8k4Ufu6 ++ cat /tmp/tmp.wrF5h9oL2Y ++ rm /tmp/tmp.oCB8k4Ufu6 /tmp/tmp.wrF5h9oL2Y ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + desc 'check if service and statefulset created with expected config' + set +o xtrace ----------------------------------------------------------------------------------- check if service and statefulset created with expected config ----------------------------------------------------------------------------------- + compare_kubectl statefulset/some-name-rs0 + local resource=statefulset/some-name-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml + local new_result=/tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-9120", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.HUIsKDupKU ++ mktemp + local LAST_ERR=/tmp/tmp.DVj4YwXCg1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HUIsKDupKU + cat /tmp/tmp.DVj4YwXCg1 + rm /tmp/tmp.HUIsKDupKU /tmp/tmp.DVj4YwXCg1 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0.yml /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-23T13:52:21+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-9120", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.4jR1f3xgm5 ++ mktemp + local LAST_ERR=/tmp/tmp.NKkanRjPKX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4jR1f3xgm5 + cat /tmp/tmp.NKkanRjPKX + rm /tmp/tmp.4jR1f3xgm5 /tmp/tmp.NKkanRjPKX + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-23T13:52:22+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-9120", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Un3pq7Tksg ++ mktemp + local LAST_ERR=/tmp/tmp.hsQ1nsZfUi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Un3pq7Tksg + cat /tmp/tmp.hsQ1nsZfUi + rm /tmp/tmp.Un3pq7Tksg /tmp/tmp.hsQ1nsZfUi + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-12-23T13:52:23+0000] compare_kubectl: statefulset/some-name-mongos OK + desc 'disabling sharding' + set +o xtrace ----------------------------------------------------------------------------------- disabling sharding ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.uzqjpgvqp3 ++ mktemp + local LAST_ERR=/tmp/tmp.TWJEiWzEuz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uzqjpgvqp3 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.TWJEiWzEuz + rm /tmp/tmp.uzqjpgvqp3 /tmp/tmp.TWJEiWzEuz + return 0 + sleep 10 + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NEsPKGJ8ba +++ mktemp ++ local LAST_ERR=/tmp/tmp.z5qMotTY5T ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NEsPKGJ8ba ++ cat /tmp/tmp.z5qMotTY5T ++ rm /tmp/tmp.NEsPKGJ8ba /tmp/tmp.z5qMotTY5T ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Cp23CUUHCA +++ mktemp ++ local LAST_ERR=/tmp/tmp.Of4NI4A4Hk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Cp23CUUHCA ++ cat /tmp/tmp.Of4NI4A4Hk ++ rm /tmp/tmp.Cp23CUUHCA /tmp/tmp.Of4NI4A4Hk ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5JhRT0w6QC +++ mktemp ++ local LAST_ERR=/tmp/tmp.vKoNpVaUCf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5JhRT0w6QC ++ cat /tmp/tmp.vKoNpVaUCf ++ rm /tmp/tmp.5JhRT0w6QC /tmp/tmp.vKoNpVaUCf ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qmzz1Mg86H +++ mktemp ++ local LAST_ERR=/tmp/tmp.aSs6pdYlwA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qmzz1Mg86H ++ cat /tmp/tmp.aSs6pdYlwA ++ rm /tmp/tmp.qmzz1Mg86H /tmp/tmp.aSs6pdYlwA ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.j1Z73WelgR +++ mktemp ++ local LAST_ERR=/tmp/tmp.nKO4abTd1A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.j1Z73WelgR ++ cat /tmp/tmp.nKO4abTd1A ++ rm /tmp/tmp.j1Z73WelgR /tmp/tmp.nKO4abTd1A ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HHU1z8Y2Is +++ mktemp ++ local LAST_ERR=/tmp/tmp.vzOxMM8QZj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HHU1z8Y2Is ++ cat /tmp/tmp.vzOxMM8QZj ++ rm /tmp/tmp.HHU1z8Y2Is /tmp/tmp.vzOxMM8QZj ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.U8dYVD7NN2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.S9hYiYdT3Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U8dYVD7NN2 ++ cat /tmp/tmp.S9hYiYdT3Y ++ rm /tmp/tmp.U8dYVD7NN2 /tmp/tmp.S9hYiYdT3Y ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RppDC6wcpQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.PIVCV9AlzI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RppDC6wcpQ ++ cat /tmp/tmp.PIVCV9AlzI ++ rm /tmp/tmp.RppDC6wcpQ /tmp/tmp.PIVCV9AlzI ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FI4U6fCV1S +++ mktemp ++ local LAST_ERR=/tmp/tmp.igCH5PjoYO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FI4U6fCV1S ++ cat /tmp/tmp.igCH5PjoYO ++ rm /tmp/tmp.FI4U6fCV1S /tmp/tmp.igCH5PjoYO ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HeqW1Oa2TL +++ mktemp ++ local LAST_ERR=/tmp/tmp.j9Bkqz0vgC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HeqW1Oa2TL ++ cat /tmp/tmp.j9Bkqz0vgC ++ rm /tmp/tmp.HeqW1Oa2TL /tmp/tmp.j9Bkqz0vgC ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1jdGnHXfpm +++ mktemp ++ local LAST_ERR=/tmp/tmp.8yiVZ4tXno ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1jdGnHXfpm ++ cat /tmp/tmp.8yiVZ4tXno ++ rm /tmp/tmp.1jdGnHXfpm /tmp/tmp.8yiVZ4tXno ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eKg3vYhBxU +++ mktemp ++ local LAST_ERR=/tmp/tmp.uRp8mxvssq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.eKg3vYhBxU ++ cat /tmp/tmp.uRp8mxvssq ++ rm /tmp/tmp.eKg3vYhBxU /tmp/tmp.uRp8mxvssq ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YP4p4Y7HJk +++ mktemp ++ local LAST_ERR=/tmp/tmp.gzyxvnWuDe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YP4p4Y7HJk ++ cat /tmp/tmp.gzyxvnWuDe ++ rm /tmp/tmp.YP4p4Y7HJk /tmp/tmp.gzyxvnWuDe ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.p4f1R3RgEl +++ mktemp ++ local LAST_ERR=/tmp/tmp.AV5DvDtY01 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.p4f1R3RgEl ++ cat /tmp/tmp.AV5DvDtY01 ++ rm /tmp/tmp.p4f1R3RgEl /tmp/tmp.AV5DvDtY01 ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gy3VT43qhI +++ mktemp ++ local LAST_ERR=/tmp/tmp.75cnui2k1k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gy3VT43qhI ++ cat /tmp/tmp.75cnui2k1k ++ rm /tmp/tmp.gy3VT43qhI /tmp/tmp.75cnui2k1k ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VlycBjbNmz +++ mktemp ++ local LAST_ERR=/tmp/tmp.AVoIdk309h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VlycBjbNmz ++ cat /tmp/tmp.AVoIdk309h ++ rm /tmp/tmp.VlycBjbNmz /tmp/tmp.AVoIdk309h ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QZfVWgFOBA +++ mktemp ++ local LAST_ERR=/tmp/tmp.tVsRv8H6dt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QZfVWgFOBA ++ cat /tmp/tmp.tVsRv8H6dt ++ rm /tmp/tmp.QZfVWgFOBA /tmp/tmp.tVsRv8H6dt ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ra7BuhNJVc +++ mktemp ++ local LAST_ERR=/tmp/tmp.Pqml9JCYe0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ra7BuhNJVc ++ cat /tmp/tmp.Pqml9JCYe0 ++ rm /tmp/tmp.Ra7BuhNJVc /tmp/tmp.Pqml9JCYe0 ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QGc4Jl2tD0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.NNksyMZDVd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.QGc4Jl2tD0 ++ cat /tmp/tmp.NNksyMZDVd ++ rm /tmp/tmp.QGc4Jl2tD0 /tmp/tmp.NNksyMZDVd ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cuCOgciC7u +++ mktemp ++ local LAST_ERR=/tmp/tmp.dbzSBGgO6r ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cuCOgciC7u ++ cat /tmp/tmp.dbzSBGgO6r ++ rm /tmp/tmp.cuCOgciC7u /tmp/tmp.dbzSBGgO6r ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9AGWYfFcpV +++ mktemp ++ local LAST_ERR=/tmp/tmp.5KCgWYyFhU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9AGWYfFcpV ++ cat /tmp/tmp.5KCgWYyFhU ++ rm /tmp/tmp.9AGWYfFcpV /tmp/tmp.5KCgWYyFhU ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4zwZgQZxUD +++ mktemp ++ local LAST_ERR=/tmp/tmp.00ITHxeHWo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4zwZgQZxUD ++ cat /tmp/tmp.00ITHxeHWo ++ rm /tmp/tmp.4zwZgQZxUD /tmp/tmp.00ITHxeHWo ++ return 0 + [[ paused == \r\e\a\d\y ]] + let retry+=1 + '[' 22 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OEhQV3lS09 +++ mktemp ++ local LAST_ERR=/tmp/tmp.oW8Sx7fLoN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.OEhQV3lS09 ++ cat /tmp/tmp.oW8Sx7fLoN ++ rm /tmp/tmp.OEhQV3lS09 /tmp/tmp.oW8Sx7fLoN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 23 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JuPWknn6hS +++ mktemp ++ local LAST_ERR=/tmp/tmp.FFdYUwuMUG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JuPWknn6hS ++ cat /tmp/tmp.FFdYUwuMUG ++ rm /tmp/tmp.JuPWknn6hS /tmp/tmp.FFdYUwuMUG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 24 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.56BlP8fyxh +++ mktemp ++ local LAST_ERR=/tmp/tmp.ugeIDUpOLS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.56BlP8fyxh ++ cat /tmp/tmp.ugeIDUpOLS ++ rm /tmp/tmp.56BlP8fyxh /tmp/tmp.ugeIDUpOLS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 25 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fspVDJBegg +++ mktemp ++ local LAST_ERR=/tmp/tmp.3VEyRU7oR5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fspVDJBegg ++ cat /tmp/tmp.3VEyRU7oR5 ++ rm /tmp/tmp.fspVDJBegg /tmp/tmp.3VEyRU7oR5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 26 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wZY0A0v512 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VNSPRuEgDE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wZY0A0v512 ++ cat /tmp/tmp.VNSPRuEgDE ++ rm /tmp/tmp.wZY0A0v512 /tmp/tmp.VNSPRuEgDE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 27 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vpVoXENzhI +++ mktemp ++ local LAST_ERR=/tmp/tmp.GixXkPVMRe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vpVoXENzhI ++ cat /tmp/tmp.GixXkPVMRe ++ rm /tmp/tmp.vpVoXENzhI /tmp/tmp.GixXkPVMRe ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_kubectl statefulset/some-name-rs0 -sharding-disabled + local resource=statefulset/some-name-rs0 + local postfix=-sharding-disabled + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml + local new_result=/tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-9120", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KABmYpn4oT ++ mktemp + local LAST_ERR=/tmp/tmp.SiI25Ul3R7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KABmYpn4oT + cat /tmp/tmp.SiI25Ul3R7 + rm /tmp/tmp.KABmYpn4oT /tmp/tmp.SiI25Ul3R7 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-disabled.yml /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-23T13:57:53+0000] compare_kubectl: statefulset/some-name-rs0 OK ++ kubectl_bin get sts -o yaml ++ yq '.items | length' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M8QIKKoVDy +++ mktemp ++ local LAST_ERR=/tmp/tmp.gY4Q9DW5nX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get sts -o yaml ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M8QIKKoVDy ++ cat /tmp/tmp.gY4Q9DW5nX ++ rm /tmp/tmp.M8QIKKoVDy /tmp/tmp.gY4Q9DW5nX ++ return 0 + [[ 1 != 1 ]] + desc 'enabling sharding' + set +o xtrace ----------------------------------------------------------------------------------- enabling sharding ----------------------------------------------------------------------------------- + kubectl_bin patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.MrsOM6A7Dg ++ mktemp + local LAST_ERR=/tmp/tmp.br3UGeTRlf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json '-p=[{"op": "replace", "path": "/spec/sharding/enabled", "value": true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MrsOM6A7Dg perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.br3UGeTRlf + rm /tmp/tmp.MrsOM6A7Dg /tmp/tmp.br3UGeTRlf + return 0 + sleep 10 + wait_cluster_consistency some-name 60 + local cluster_name=some-name + local wait_time=60 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oUT0nAFPpB +++ mktemp ++ local LAST_ERR=/tmp/tmp.iM7CYrogtT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oUT0nAFPpB ++ cat /tmp/tmp.iM7CYrogtT ++ rm /tmp/tmp.oUT0nAFPpB /tmp/tmp.iM7CYrogtT ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X9CTJBXEV8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pCXwHhduuv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X9CTJBXEV8 ++ cat /tmp/tmp.pCXwHhduuv ++ rm /tmp/tmp.X9CTJBXEV8 /tmp/tmp.pCXwHhduuv ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9POMtijw5b +++ mktemp ++ local LAST_ERR=/tmp/tmp.cenFwfIA6q ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9POMtijw5b ++ cat /tmp/tmp.cenFwfIA6q ++ rm /tmp/tmp.9POMtijw5b /tmp/tmp.cenFwfIA6q ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gI6Y2iWyUp +++ mktemp ++ local LAST_ERR=/tmp/tmp.HsTRI3GOUf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gI6Y2iWyUp ++ cat /tmp/tmp.HsTRI3GOUf ++ rm /tmp/tmp.gI6Y2iWyUp /tmp/tmp.HsTRI3GOUf ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1zvxxcAJ4X +++ mktemp ++ local LAST_ERR=/tmp/tmp.N38J7QgppQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1zvxxcAJ4X ++ cat /tmp/tmp.N38J7QgppQ ++ rm /tmp/tmp.1zvxxcAJ4X /tmp/tmp.N38J7QgppQ ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wfAnQVYU1s +++ mktemp ++ local LAST_ERR=/tmp/tmp.q8WmnWeUjM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wfAnQVYU1s ++ cat /tmp/tmp.q8WmnWeUjM ++ rm /tmp/tmp.wfAnQVYU1s /tmp/tmp.q8WmnWeUjM ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ii0PQb0g1e +++ mktemp ++ local LAST_ERR=/tmp/tmp.BskLNLdhvv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ii0PQb0g1e ++ cat /tmp/tmp.BskLNLdhvv ++ rm /tmp/tmp.ii0PQb0g1e /tmp/tmp.BskLNLdhvv ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ept0leYiZS +++ mktemp ++ local LAST_ERR=/tmp/tmp.lbJpAB3Uk8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ept0leYiZS ++ cat /tmp/tmp.lbJpAB3Uk8 ++ rm /tmp/tmp.Ept0leYiZS /tmp/tmp.lbJpAB3Uk8 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uDFA0NDx39 +++ mktemp ++ local LAST_ERR=/tmp/tmp.obNsdWYE3R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uDFA0NDx39 ++ cat /tmp/tmp.obNsdWYE3R ++ rm /tmp/tmp.uDFA0NDx39 /tmp/tmp.obNsdWYE3R ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.12rYyzZHjY +++ mktemp ++ local LAST_ERR=/tmp/tmp.bhppPigRf2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.12rYyzZHjY ++ cat /tmp/tmp.bhppPigRf2 ++ rm /tmp/tmp.12rYyzZHjY /tmp/tmp.bhppPigRf2 ++ return 0 + [[ error == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tRwelKMomJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6fatt2uJqJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tRwelKMomJ ++ cat /tmp/tmp.6fatt2uJqJ ++ rm /tmp/tmp.tRwelKMomJ /tmp/tmp.6fatt2uJqJ ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 11 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VsNizI98CW +++ mktemp ++ local LAST_ERR=/tmp/tmp.zADR5SogM0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.VsNizI98CW ++ cat /tmp/tmp.zADR5SogM0 ++ rm /tmp/tmp.VsNizI98CW /tmp/tmp.zADR5SogM0 ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 12 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MeuDGStwxZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Q3ysM49qZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MeuDGStwxZ ++ cat /tmp/tmp.3Q3ysM49qZ ++ rm /tmp/tmp.MeuDGStwxZ /tmp/tmp.3Q3ysM49qZ ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 13 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w5XlFcguz6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.0jE2ZspwWK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w5XlFcguz6 ++ cat /tmp/tmp.0jE2ZspwWK ++ rm /tmp/tmp.w5XlFcguz6 /tmp/tmp.0jE2ZspwWK ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 14 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sKEaUTsDcJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q12QpCkR3f ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sKEaUTsDcJ ++ cat /tmp/tmp.Q12QpCkR3f ++ rm /tmp/tmp.sKEaUTsDcJ /tmp/tmp.Q12QpCkR3f ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 15 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ykZDnwjIqa +++ mktemp ++ local LAST_ERR=/tmp/tmp.rlISwMdxPr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ykZDnwjIqa ++ cat /tmp/tmp.rlISwMdxPr ++ rm /tmp/tmp.ykZDnwjIqa /tmp/tmp.rlISwMdxPr ++ return 0 + [[ stopping == \r\e\a\d\y ]] + let retry+=1 + '[' 16 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BiRreFJlFH +++ mktemp ++ local LAST_ERR=/tmp/tmp.g3Qq4sShdr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BiRreFJlFH ++ cat /tmp/tmp.g3Qq4sShdr ++ rm /tmp/tmp.BiRreFJlFH /tmp/tmp.g3Qq4sShdr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 17 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jtV4ZcHOLL +++ mktemp ++ local LAST_ERR=/tmp/tmp.QrDAI4JgmN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jtV4ZcHOLL ++ cat /tmp/tmp.QrDAI4JgmN ++ rm /tmp/tmp.jtV4ZcHOLL /tmp/tmp.QrDAI4JgmN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 18 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WYJJJeihgg +++ mktemp ++ local LAST_ERR=/tmp/tmp.hwsMhAcAMG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WYJJJeihgg ++ cat /tmp/tmp.hwsMhAcAMG ++ rm /tmp/tmp.WYJJJeihgg /tmp/tmp.hwsMhAcAMG ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 19 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wTVtORaPrA +++ mktemp ++ local LAST_ERR=/tmp/tmp.qIlqKGuV0a ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wTVtORaPrA ++ cat /tmp/tmp.qIlqKGuV0a ++ rm /tmp/tmp.wTVtORaPrA /tmp/tmp.qIlqKGuV0a ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 20 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9WPDLvC1p1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.aL7SYKKdri ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9WPDLvC1p1 ++ cat /tmp/tmp.aL7SYKKdri ++ rm /tmp/tmp.9WPDLvC1p1 /tmp/tmp.aL7SYKKdri ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 21 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mKXRrM6NMW +++ mktemp ++ local LAST_ERR=/tmp/tmp.FOM1jQOLzH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mKXRrM6NMW ++ cat /tmp/tmp.FOM1jQOLzH ++ rm /tmp/tmp.mKXRrM6NMW /tmp/tmp.FOM1jQOLzH ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 22 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lFIXOpBL4l +++ mktemp ++ local LAST_ERR=/tmp/tmp.rOgSMSTv8W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lFIXOpBL4l ++ cat /tmp/tmp.rOgSMSTv8W ++ rm /tmp/tmp.lFIXOpBL4l /tmp/tmp.rOgSMSTv8W ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 23 -ge 60 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dPIez9nmPx +++ mktemp ++ local LAST_ERR=/tmp/tmp.X2DHjBICGb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.dPIez9nmPx ++ cat /tmp/tmp.X2DHjBICGb ++ rm /tmp/tmp.dPIez9nmPx /tmp/tmp.X2DHjBICGb ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + compare_kubectl statefulset/some-name-rs0 -sharding-enabled + local resource=statefulset/some-name-rs0 + local postfix=-sharding-enabled + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml + local new_result=/tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-9120", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.oOiJE3HpSC ++ mktemp + local LAST_ERR=/tmp/tmp.7XrMgMVqZ4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oOiJE3HpSC + cat /tmp/tmp.7XrMgMVqZ4 + rm /tmp/tmp.oOiJE3HpSC /tmp/tmp.7XrMgMVqZ4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-rs0-sharding-enabled.yml /tmp/tmp.kMkliBXMM6/statefulset_some-name-rs0.yml + log 'compare_kubectl: statefulset/some-name-rs0 OK' + set +o xtrace [2025-12-23T14:02:38+0000] compare_kubectl: statefulset/some-name-rs0 OK + compare_kubectl statefulset/some-name-cfg + local resource=statefulset/some-name-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml + local new_result=/tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-9120", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.jsh9ND3LRe ++ mktemp + local LAST_ERR=/tmp/tmp.VctBL1M6mk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jsh9ND3LRe + cat /tmp/tmp.VctBL1M6mk + rm /tmp/tmp.jsh9ND3LRe /tmp/tmp.VctBL1M6mk + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-cfg.yml /tmp/tmp.kMkliBXMM6/statefulset_some-name-cfg.yml + log 'compare_kubectl: statefulset/some-name-cfg OK' + set +o xtrace [2025-12-23T14:02:39+0000] compare_kubectl: statefulset/some-name-cfg OK + compare_kubectl statefulset/some-name-mongos '' + local resource=statefulset/some-name-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml + local new_result=/tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/some-name-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("expose-sharded-9120", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.hUm0ve2wzD ++ mktemp + local LAST_ERR=/tmp/tmp.L8nZhqrkvR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/some-name-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hUm0ve2wzD + cat /tmp/tmp.L8nZhqrkvR + rm /tmp/tmp.hUm0ve2wzD /tmp/tmp.L8nZhqrkvR + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + version_gt 1.22 ++ echo '1.31 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/statefulset_some-name-mongos.yml /tmp/tmp.kMkliBXMM6/statefulset_some-name-mongos.yml + log 'compare_kubectl: statefulset/some-name-mongos OK' + set +o xtrace [2025-12-23T14:02:41+0000] compare_kubectl: statefulset/some-name-mongos OK + desc 'write data, read from all' + set +o xtrace ----------------------------------------------------------------------------------- write data, read from all ----------------------------------------------------------------------------------- + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@some-name-mongos.expose-sharded-9120 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x70Z8ayuky +++ mktemp ++ local LAST_ERR=/tmp/tmp.91UvuKD5l8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x70Z8ayuky ++ cat /tmp/tmp.91UvuKD5l8 ++ rm /tmp/tmp.x70Z8ayuky /tmp/tmp.91UvuKD5l8 ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.NaGRcETD8E ++ mktemp + local LAST_ERR=/tmp/tmp.aAwV5N4FDd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NaGRcETD8E Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("1f48c779-08e5-471e-a8fc-540d25a30cf9") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.aAwV5N4FDd + rm /tmp/tmp.NaGRcETD8E /tmp/tmp.aAwV5N4FDd + return 0 + run_mongo 'db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-9120 mongodb + local 'command=db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})' + local uri=userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-9120 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ilf08JiRwf +++ mktemp ++ local LAST_ERR=/tmp/tmp.MKd2bkUdqb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Ilf08JiRwf ++ cat /tmp/tmp.MKd2bkUdqb ++ rm /tmp/tmp.Ilf08JiRwf /tmp/tmp.MKd2bkUdqb ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Kcp820hsWp ++ mktemp + local LAST_ERR=/tmp/tmp.LaFTjndIRv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''db.createUser({user: "myApp", pwd: "myPass", roles: [{ db: "myApp", role: "readWrite" }]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Kcp820hsWp Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0&ssl=false Implicit session: session { "id" : UUID("d36edb31-c6c7-4406-ad50-aa657ad954c7") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.LaFTjndIRv + rm /tmp/tmp.Kcp820hsWp /tmp/tmp.LaFTjndIRv + return 0 + run_mongos 'sh.enableSharding("myApp","rs0")' clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-9120 + local 'command=sh.enableSharding("myApp","rs0")' + local uri=clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.doGowO9ZmZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.tRIXskksp9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.doGowO9ZmZ ++ cat /tmp/tmp.tRIXskksp9 ++ rm /tmp/tmp.doGowO9ZmZ /tmp/tmp.tRIXskksp9 ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.b77KaMUoIU ++ mktemp + local LAST_ERR=/tmp/tmp.o71rXWxdK3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''sh.enableSharding("myApp","rs0")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b77KaMUoIU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("375533d6-f170-416b-a359-987dd04cf866") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1766498571, 8), "signature" : { "hash" : BinData(0,"B2aTbGQsus1s6Z32RYkOKX0ufyk="), "keyId" : NumberLong("7587050343880458264") } }, "operationTime" : Timestamp(1766498571, 5) } bye + cat /tmp/tmp.o71rXWxdK3 + rm /tmp/tmp.b77KaMUoIU /tmp/tmp.o71rXWxdK3 + return 0 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@some-name-mongos.expose-sharded-9120 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TfXrtw2kAW +++ mktemp ++ local LAST_ERR=/tmp/tmp.91sySeKHAz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TfXrtw2kAW ++ cat /tmp/tmp.91sySeKHAz ++ rm /tmp/tmp.TfXrtw2kAW /tmp/tmp.91sySeKHAz ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.wP7ckhGEgD ++ mktemp + local LAST_ERR=/tmp/tmp.tdELmthWgZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wP7ckhGEgD Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("c0231cde-0715-4dbf-aaa6-3ef2d5f37e4d") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tdELmthWgZ + rm /tmp/tmp.wP7ckhGEgD /tmp/tmp.tdELmthWgZ + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-9120 + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local postfix= + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-23T14:02:54+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-9120 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZIZZONjSdX +++ mktemp ++ local LAST_ERR=/tmp/tmp.kx4h5a2VZd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZIZZONjSdX ++ cat /tmp/tmp.kx4h5a2VZd ++ rm /tmp/tmp.ZIZZONjSdX /tmp/tmp.kx4h5a2VZd ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Ok713vZYPx ++ mktemp + local LAST_ERR=/tmp/tmp.zAcUC9Qw2Y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ok713vZYPx + cat /tmp/tmp.zAcUC9Qw2Y + rm /tmp/tmp.Ok713vZYPx /tmp/tmp.zAcUC9Qw2Y + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find.json /tmp/tmp.kMkliBXMM6/find + desc 'Unexposed -> Exposed, ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Unexposed -> Exposed, ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.l5ATSvIM6d ++ mktemp + local LAST_ERR=/tmp/tmp.nXzt2rTOmt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l5ATSvIM6d perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.nXzt2rTOmt + rm /tmp/tmp.l5ATSvIM6d /tmp/tmp.nXzt2rTOmt + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9nEIQuQEpk +++ mktemp ++ local LAST_ERR=/tmp/tmp.kVQ7UKz2sI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9nEIQuQEpk ++ cat /tmp/tmp.kVQ7UKz2sI ++ rm /tmp/tmp.9nEIQuQEpk /tmp/tmp.kVQ7UKz2sI ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GSVxzFBepN +++ mktemp ++ local LAST_ERR=/tmp/tmp.AzVfW5vfgg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GSVxzFBepN ++ cat /tmp/tmp.AzVfW5vfgg ++ rm /tmp/tmp.GSVxzFBepN /tmp/tmp.AzVfW5vfgg ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w1l4sSoDxN +++ mktemp ++ local LAST_ERR=/tmp/tmp.ilqEEm6ifL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w1l4sSoDxN ++ cat /tmp/tmp.ilqEEm6ifL ++ rm /tmp/tmp.w1l4sSoDxN /tmp/tmp.ilqEEm6ifL ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E14EBXoGKF +++ mktemp ++ local LAST_ERR=/tmp/tmp.VoIZpJC0he ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.E14EBXoGKF ++ cat /tmp/tmp.VoIZpJC0he ++ rm /tmp/tmp.E14EBXoGKF /tmp/tmp.VoIZpJC0he ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.h3RlEVeN54 +++ mktemp ++ local LAST_ERR=/tmp/tmp.RpoWAcyqPr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.h3RlEVeN54 ++ cat /tmp/tmp.RpoWAcyqPr ++ rm /tmp/tmp.h3RlEVeN54 /tmp/tmp.RpoWAcyqPr ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rX4SZbWoG1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.SQThkKjvw0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rX4SZbWoG1 ++ cat /tmp/tmp.SQThkKjvw0 ++ rm /tmp/tmp.rX4SZbWoG1 /tmp/tmp.SQThkKjvw0 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yPvceOvFdD +++ mktemp ++ local LAST_ERR=/tmp/tmp.RWqDBKf8GO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yPvceOvFdD ++ cat /tmp/tmp.RWqDBKf8GO ++ rm /tmp/tmp.yPvceOvFdD /tmp/tmp.RWqDBKf8GO ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z5jZhCaSmC +++ mktemp ++ local LAST_ERR=/tmp/tmp.RMTUXDn9oX ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z5jZhCaSmC ++ cat /tmp/tmp.RMTUXDn9oX ++ rm /tmp/tmp.Z5jZhCaSmC /tmp/tmp.RMTUXDn9oX ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hj8ikQ2AEM +++ mktemp ++ local LAST_ERR=/tmp/tmp.DGLV0aDEDm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hj8ikQ2AEM ++ cat /tmp/tmp.DGLV0aDEDm ++ rm /tmp/tmp.hj8ikQ2AEM /tmp/tmp.DGLV0aDEDm ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.481huw9ufB +++ mktemp ++ local LAST_ERR=/tmp/tmp.ejaTMU5Wz1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.481huw9ufB ++ cat /tmp/tmp.ejaTMU5Wz1 ++ rm /tmp/tmp.481huw9ufB /tmp/tmp.ejaTMU5Wz1 ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100501 })' myApp:myPass@some-name-mongos.expose-sharded-9120 + local 'command=use myApp\n db.test.insert({ x: 100501 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rGmujtd2RH +++ mktemp ++ local LAST_ERR=/tmp/tmp.lkU4xxQAbb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rGmujtd2RH ++ cat /tmp/tmp.lkU4xxQAbb ++ rm /tmp/tmp.rGmujtd2RH /tmp/tmp.lkU4xxQAbb ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.5U4ewBDSLU ++ mktemp + local LAST_ERR=/tmp/tmp.EMTT2qGUOL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100501 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5U4ewBDSLU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("57510204-8de3-4549-a358-1782195d7447") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.EMTT2qGUOL + rm /tmp/tmp.5U4ewBDSLU /tmp/tmp.EMTT2qGUOL + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-9120 -2nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-23T14:04:07+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-9120 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wnv98in9wk +++ mktemp ++ local LAST_ERR=/tmp/tmp.rZlmtvxoQc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wnv98in9wk ++ cat /tmp/tmp.rZlmtvxoQc ++ rm /tmp/tmp.wnv98in9wk /tmp/tmp.rZlmtvxoQc ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.p0aWD7Fxtr ++ mktemp + local LAST_ERR=/tmp/tmp.NE9oNd6fbD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p0aWD7Fxtr + cat /tmp/tmp.NE9oNd6fbD + rm /tmp/tmp.p0aWD7Fxtr /tmp/tmp.NE9oNd6fbD + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.kMkliBXMM6/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:04:10+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 == *cfg* ]] + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.YSutafuWvy +++ mktemp ++ local LAST_ERR=/tmp/tmp.LsFuoNdNlr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YSutafuWvy ++ cat /tmp/tmp.LsFuoNdNlr ++ rm /tmp/tmp.YSutafuWvy /tmp/tmp.LsFuoNdNlr ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.0ifztDmSYk ++ mktemp + local LAST_ERR=/tmp/tmp.J2MCQmNbfw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0ifztDmSYk + cat /tmp/tmp.J2MCQmNbfw + rm /tmp/tmp.0ifztDmSYk /tmp/tmp.J2MCQmNbfw + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.kMkliBXMM6/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:04:12+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 == *cfg* ]] + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.9AE1e0KZya +++ mktemp ++ local LAST_ERR=/tmp/tmp.se4W78nTQQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9AE1e0KZya ++ cat /tmp/tmp.se4W78nTQQ ++ rm /tmp/tmp.9AE1e0KZya /tmp/tmp.se4W78nTQQ ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.g4PIobXtlK ++ mktemp + local LAST_ERR=/tmp/tmp.02dkxnq3vA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g4PIobXtlK + cat /tmp/tmp.02dkxnq3vA + rm /tmp/tmp.g4PIobXtlK /tmp/tmp.02dkxnq3vA + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.kMkliBXMM6/find-2nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 -2nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + local postfix=-2nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:04:14+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 == *cfg* ]] + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.9xIEhAzue6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.UGrCOwv7Lg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9xIEhAzue6 ++ cat /tmp/tmp.UGrCOwv7Lg ++ rm /tmp/tmp.9xIEhAzue6 /tmp/tmp.UGrCOwv7Lg ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DKFBDdIXyp ++ mktemp + local LAST_ERR=/tmp/tmp.tBrlFiYiwO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DKFBDdIXyp + cat /tmp/tmp.tBrlFiYiwO + rm /tmp/tmp.DKFBDdIXyp /tmp/tmp.tBrlFiYiwO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-2nd.json /tmp/tmp.kMkliBXMM6/find-2nd + compare_mongo_config some-name expose-sharded-9120 + cluster=some-name + namespace=expose-sharded-9120 + enable_expose=true + desc 'Compare mongo config' + set +o xtrace ----------------------------------------------------------------------------------- Compare mongo config ----------------------------------------------------------------------------------- + cfg_0_endpoint=some-name-cfg-0.some-name-cfg.expose-sharded-9120.svc.cluster.local ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-9120 ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-9120 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local replica_set=rs0 ++ [[ clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-9120 == *cfg* ]] ++ replica_set=cfg ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp egrep: warning: egrep is obsolescent; using grep -E +++ local LAST_OUT=/tmp/tmp.4s902rX38I ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kQ8M0QfHuu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4s902rX38I +++ cat /tmp/tmp.kQ8M0QfHuu +++ rm /tmp/tmp.4s902rX38I /tmp/tmp.kQ8M0QfHuu +++ return 0 ++ local client_container=psmdb-client-696897d69b-lr5sx ++ kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cpRJ4pun84 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HYmZEpnjSy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-cfg-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-cfg.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=cfg ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.cpRJ4pun84 ++ cat /tmp/tmp.HYmZEpnjSy ++ rm /tmp/tmp.cpRJ4pun84 /tmp/tmp.HYmZEpnjSy ++ return 0 + cfg_0_endpoint_actual=some-name-cfg-0.some-name-cfg.expose-sharded-9120.svc.cluster.local:27017 + rs0_0_endpoint=some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local ++ run_mongo 'var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-9120 ++ local 'command=var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })' ++ local uri=clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-9120 ++ local driver=mongodb+srv ++ local suffix=.svc.cluster.local ++ local mongo_flag= ++ local replica_set=rs0 ++ [[ clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-9120 == *cfg* ]] ++ egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' +++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++++ mktemp egrep: warning: egrep is obsolescent; using grep -E +++ local LAST_OUT=/tmp/tmp.xA92ZtUMGE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Zd0QdwlQIn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xA92ZtUMGE +++ cat /tmp/tmp.Zd0QdwlQIn +++ rm /tmp/tmp.xA92ZtUMGE /tmp/tmp.Zd0QdwlQIn +++ return 0 ++ local client_container=psmdb-client-696897d69b-lr5sx ++ kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T8m4qHdhyk +++ mktemp ++ local LAST_ERR=/tmp/tmp.9xEL9CwX9C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''var host;var x=0;rs.conf().members.forEach(function(d){ if(d.tags.podName=="some-name-rs0-0"){ host=rs.conf().members[x].host;print(host)};x=x+1; })\n'\'' | mongo mongodb+srv://clusterAdmin:clusterAdmin123456@some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T8m4qHdhyk ++ cat /tmp/tmp.9xEL9CwX9C ++ rm /tmp/tmp.T8m4qHdhyk /tmp/tmp.9xEL9CwX9C ++ return 0 + rs0_0_endpoint_actual=some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local:27017 + [[ some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\r\s\0\-\0\.\s\o\m\e\-\n\a\m\e\-\r\s\0\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\9\1\2\0\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + [[ some-name-cfg-0.some-name-cfg.expose-sharded-9120.svc.cluster.local:27017 != \s\o\m\e\-\n\a\m\e\-\c\f\g\-\0\.\s\o\m\e\-\n\a\m\e\-\c\f\g\.\e\x\p\o\s\e\-\s\h\a\r\d\e\d\-\9\1\2\0\.\s\v\c\.\c\l\u\s\t\e\r\.\l\o\c\a\l\:\2\7\0\1\7 ]] + desc 'Exposed, ClusterIP -> LoadBalancer' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, ClusterIP -> LoadBalancer ----------------------------------------------------------------------------------- + expose_cluster LoadBalancer + expose_type=LoadBalancer + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.YmFkAtAGv6 ++ mktemp + local LAST_ERR=/tmp/tmp.ZkCUaUyjoq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "LoadBalancer" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "LoadBalancer" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YmFkAtAGv6 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.ZkCUaUyjoq + rm /tmp/tmp.YmFkAtAGv6 /tmp/tmp.ZkCUaUyjoq + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6d0WdEDv7Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.5UkNihIlzn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6d0WdEDv7Q ++ cat /tmp/tmp.5UkNihIlzn ++ rm /tmp/tmp.6d0WdEDv7Q /tmp/tmp.5UkNihIlzn ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MXFwqCDOGx +++ mktemp ++ local LAST_ERR=/tmp/tmp.KgptDZ621Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MXFwqCDOGx ++ cat /tmp/tmp.KgptDZ621Y ++ rm /tmp/tmp.MXFwqCDOGx /tmp/tmp.KgptDZ621Y ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vdQLb1Ldb0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.r2iPIBkLQE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vdQLb1Ldb0 ++ cat /tmp/tmp.r2iPIBkLQE ++ rm /tmp/tmp.vdQLb1Ldb0 /tmp/tmp.r2iPIBkLQE ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3tkDDUbPvl +++ mktemp ++ local LAST_ERR=/tmp/tmp.v8wi6ox13K ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3tkDDUbPvl ++ cat /tmp/tmp.v8wi6ox13K ++ rm /tmp/tmp.3tkDDUbPvl /tmp/tmp.v8wi6ox13K ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YHo83VBm67 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XEuky4v6Ov ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YHo83VBm67 ++ cat /tmp/tmp.XEuky4v6Ov ++ rm /tmp/tmp.YHo83VBm67 /tmp/tmp.XEuky4v6Ov ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hI7gashlFR +++ mktemp ++ local LAST_ERR=/tmp/tmp.ooYk8ccAea ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.hI7gashlFR ++ cat /tmp/tmp.ooYk8ccAea ++ rm /tmp/tmp.hI7gashlFR /tmp/tmp.ooYk8ccAea ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1Fo005GsV5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5qYKyOJbKV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1Fo005GsV5 ++ cat /tmp/tmp.5qYKyOJbKV ++ rm /tmp/tmp.1Fo005GsV5 /tmp/tmp.5qYKyOJbKV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WY0fvOYn9c +++ mktemp ++ local LAST_ERR=/tmp/tmp.pS8pbetxSP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WY0fvOYn9c ++ cat /tmp/tmp.pS8pbetxSP ++ rm /tmp/tmp.WY0fvOYn9c /tmp/tmp.pS8pbetxSP ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.svYJSd1alG +++ mktemp ++ local LAST_ERR=/tmp/tmp.ps9dP9Khrs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.svYJSd1alG ++ cat /tmp/tmp.ps9dP9Khrs ++ rm /tmp/tmp.svYJSd1alG /tmp/tmp.ps9dP9Khrs ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XJa2DmnvMK +++ mktemp ++ local LAST_ERR=/tmp/tmp.qBp3PpG3ls ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XJa2DmnvMK ++ cat /tmp/tmp.qBp3PpG3ls ++ rm /tmp/tmp.XJa2DmnvMK /tmp/tmp.qBp3PpG3ls ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100502 })' myApp:myPass@some-name-mongos.expose-sharded-9120 + local 'command=use myApp\n db.test.insert({ x: 100502 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.L9sMAM34Lq +++ mktemp ++ local LAST_ERR=/tmp/tmp.5UaFiymmzt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.L9sMAM34Lq ++ cat /tmp/tmp.5UaFiymmzt ++ rm /tmp/tmp.L9sMAM34Lq /tmp/tmp.5UaFiymmzt ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.UnKY65j3gU ++ mktemp + local LAST_ERR=/tmp/tmp.aXGSbnLfuc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100502 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UnKY65j3gU Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("86536f9c-2c3c-4567-bce3-61cb16bc11c9") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.aXGSbnLfuc + rm /tmp/tmp.UnKY65j3gU /tmp/tmp.aXGSbnLfuc + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-9120 -3nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-23T14:05:29+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-9120 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' egrep: warning: egrep is obsolescent; using grep -E ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3ua1mj6Id0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fjwvGXDpj5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3ua1mj6Id0 ++ cat /tmp/tmp.fjwvGXDpj5 ++ rm /tmp/tmp.3ua1mj6Id0 /tmp/tmp.fjwvGXDpj5 ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.UpddfEFWsO ++ mktemp + local LAST_ERR=/tmp/tmp.6EM6MDuzpp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UpddfEFWsO + cat /tmp/tmp.6EM6MDuzpp + rm /tmp/tmp.UpddfEFWsO /tmp/tmp.6EM6MDuzpp + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.kMkliBXMM6/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:05:31+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 mongodb '' '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZwoT91ynfC +++ mktemp ++ local LAST_ERR=/tmp/tmp.gdrKpYeBzw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZwoT91ynfC ++ cat /tmp/tmp.gdrKpYeBzw ++ rm /tmp/tmp.ZwoT91ynfC /tmp/tmp.gdrKpYeBzw ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.PapQjbLVtr ++ mktemp + local LAST_ERR=/tmp/tmp.oRn7ne5hhV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PapQjbLVtr + cat /tmp/tmp.oRn7ne5hhV + rm /tmp/tmp.PapQjbLVtr /tmp/tmp.oRn7ne5hhV + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.kMkliBXMM6/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:05:34+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.ZviMnnefdK +++ mktemp ++ local LAST_ERR=/tmp/tmp.nZAKdYa0nw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZviMnnefdK ++ cat /tmp/tmp.nZAKdYa0nw ++ rm /tmp/tmp.ZviMnnefdK /tmp/tmp.nZAKdYa0nw ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.U42TLrrfvm ++ mktemp + local LAST_ERR=/tmp/tmp.X5JWtIdWMo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.U42TLrrfvm + cat /tmp/tmp.X5JWtIdWMo + rm /tmp/tmp.U42TLrrfvm /tmp/tmp.X5JWtIdWMo + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.kMkliBXMM6/find-3nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 -3nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + local postfix=-3nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:05:37+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.imDD5gvYU5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6ulxGpTUsY ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.imDD5gvYU5 ++ cat /tmp/tmp.6ulxGpTUsY ++ rm /tmp/tmp.imDD5gvYU5 /tmp/tmp.6ulxGpTUsY ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.DxjUbXLFWR ++ mktemp + local LAST_ERR=/tmp/tmp.KJIQ9QLRDu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DxjUbXLFWR + cat /tmp/tmp.KJIQ9QLRDu + rm /tmp/tmp.DxjUbXLFWR /tmp/tmp.KJIQ9QLRDu + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-3nd.json /tmp/tmp.kMkliBXMM6/find-3nd + sleep 60 + desc 'Pause Exposed cluster (LoadBalancer)' + set +o xtrace ----------------------------------------------------------------------------------- Pause Exposed cluster (LoadBalancer) ----------------------------------------------------------------------------------- + stop_cluster some-name + local cluster_name=some-name + local max_wait_time=120 + local passed_time=0 + local sleep_time=1 + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.fJp2ynkRBk ++ mktemp + local LAST_ERR=/tmp/tmp.6f8SL5lF83 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fJp2ynkRBk perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.6f8SL5lF83 + rm /tmp/tmp.fJp2ynkRBk /tmp/tmp.6f8SL5lF83 + return 0 + set +x Waiting for cluster stop...............Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found .Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found Error from server (NotFound): deployments.apps "some-name-mongos" not found + start_cluster some-name + local cluster_name=some-name + kubectl_bin patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' ++ mktemp + local LAST_OUT=/tmp/tmp.7rpAXidY9W ++ mktemp + local LAST_ERR=/tmp/tmp.rLuCaTMcoQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type json '-p=[{"op":"add","path":"/spec/pause","value":false}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7rpAXidY9W perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.rLuCaTMcoQ + rm /tmp/tmp.7rpAXidY9W /tmp/tmp.rLuCaTMcoQ + return 0 + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FyAtvG1KK8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.xIO49Eh0rb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FyAtvG1KK8 ++ cat /tmp/tmp.xIO49Eh0rb ++ rm /tmp/tmp.FyAtvG1KK8 /tmp/tmp.xIO49Eh0rb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 1 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BAcynb50Di +++ mktemp ++ local LAST_ERR=/tmp/tmp.6xjOXNGVcN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.BAcynb50Di ++ cat /tmp/tmp.6xjOXNGVcN ++ rm /tmp/tmp.BAcynb50Di /tmp/tmp.6xjOXNGVcN ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 2 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3HrEuy0gc5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HwVCskPWgS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3HrEuy0gc5 ++ cat /tmp/tmp.HwVCskPWgS ++ rm /tmp/tmp.3HrEuy0gc5 /tmp/tmp.HwVCskPWgS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 3 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rhms4Kd9Hm +++ mktemp ++ local LAST_ERR=/tmp/tmp.2HHnZzfwpq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Rhms4Kd9Hm ++ cat /tmp/tmp.2HHnZzfwpq ++ rm /tmp/tmp.Rhms4Kd9Hm /tmp/tmp.2HHnZzfwpq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 4 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7I9z6Lmx35 +++ mktemp ++ local LAST_ERR=/tmp/tmp.R03DfTOrwJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7I9z6Lmx35 ++ cat /tmp/tmp.R03DfTOrwJ ++ rm /tmp/tmp.7I9z6Lmx35 /tmp/tmp.R03DfTOrwJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 5 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YYAAOp9UOe +++ mktemp ++ local LAST_ERR=/tmp/tmp.Atx9pEvbz6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YYAAOp9UOe ++ cat /tmp/tmp.Atx9pEvbz6 ++ rm /tmp/tmp.YYAAOp9UOe /tmp/tmp.Atx9pEvbz6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 6 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Am0e75CB8L +++ mktemp ++ local LAST_ERR=/tmp/tmp.CsVfP7hWQZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Am0e75CB8L ++ cat /tmp/tmp.CsVfP7hWQZ ++ rm /tmp/tmp.Am0e75CB8L /tmp/tmp.CsVfP7hWQZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 7 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JEGvW252Zm +++ mktemp ++ local LAST_ERR=/tmp/tmp.XQ6a1nep4S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JEGvW252Zm ++ cat /tmp/tmp.XQ6a1nep4S ++ rm /tmp/tmp.JEGvW252Zm /tmp/tmp.XQ6a1nep4S ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 8 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ggX589nqtN +++ mktemp ++ local LAST_ERR=/tmp/tmp.40IL0rIRLS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ggX589nqtN ++ cat /tmp/tmp.40IL0rIRLS ++ rm /tmp/tmp.ggX589nqtN /tmp/tmp.40IL0rIRLS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 9 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MgTguAaDFo +++ mktemp ++ local LAST_ERR=/tmp/tmp.1q4YwZIPZr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MgTguAaDFo ++ cat /tmp/tmp.1q4YwZIPZr ++ rm /tmp/tmp.MgTguAaDFo /tmp/tmp.1q4YwZIPZr ++ return 0 + [[ initializing == \r\e\a\d\y ]] + let retry+=1 + '[' 10 -ge 32 ']' + echo -n . .+ sleep 10 ++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IUg8KlehZN +++ mktemp ++ local LAST_ERR=/tmp/tmp.9lXSuW1WJf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.IUg8KlehZN ++ cat /tmp/tmp.9lXSuW1WJf ++ rm /tmp/tmp.IUg8KlehZN /tmp/tmp.9lXSuW1WJf ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100503 })' myApp:myPass@some-name-mongos.expose-sharded-9120 + local 'command=use myApp\n db.test.insert({ x: 100503 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mpNKKN6ZnR +++ mktemp ++ local LAST_ERR=/tmp/tmp.lDfPRxMLeJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mpNKKN6ZnR ++ cat /tmp/tmp.lDfPRxMLeJ ++ rm /tmp/tmp.mpNKKN6ZnR /tmp/tmp.lDfPRxMLeJ ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.PofMqUSSLu ++ mktemp + local LAST_ERR=/tmp/tmp.tLReG8lT7t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100503 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PofMqUSSLu Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("d7fc9b42-3853-4244-8adc-dcf5ea295289") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.tLReG8lT7t + rm /tmp/tmp.PofMqUSSLu /tmp/tmp.tLReG8lT7t + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-9120 -4nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-23T14:18:04+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-9120 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iAetdiANdT +++ mktemp ++ local LAST_ERR=/tmp/tmp.JzAabqMqkm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iAetdiANdT ++ cat /tmp/tmp.JzAabqMqkm ++ rm /tmp/tmp.iAetdiANdT /tmp/tmp.JzAabqMqkm ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.Iqd8QdVsFb ++ mktemp + local LAST_ERR=/tmp/tmp.dmyo6LJlZz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Iqd8QdVsFb + cat /tmp/tmp.dmyo6LJlZz + rm /tmp/tmp.Iqd8QdVsFb /tmp/tmp.dmyo6LJlZz + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.kMkliBXMM6/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:18:08+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.TMeYKNckTN +++ mktemp ++ local LAST_ERR=/tmp/tmp.V8g5a8H971 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TMeYKNckTN ++ cat /tmp/tmp.V8g5a8H971 ++ rm /tmp/tmp.TMeYKNckTN /tmp/tmp.V8g5a8H971 ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.Qr4BZlid1k ++ mktemp + local LAST_ERR=/tmp/tmp.mTTrgiOg0N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Qr4BZlid1k + cat /tmp/tmp.mTTrgiOg0N + rm /tmp/tmp.Qr4BZlid1k /tmp/tmp.mTTrgiOg0N + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.kMkliBXMM6/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:18:10+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 == *cfg* ]] + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.iOTGQ6aEle +++ mktemp ++ local LAST_ERR=/tmp/tmp.k3reOqO9yc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iOTGQ6aEle ++ cat /tmp/tmp.k3reOqO9yc ++ rm /tmp/tmp.iOTGQ6aEle /tmp/tmp.k3reOqO9yc ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.CqmtEefqvQ ++ mktemp + local LAST_ERR=/tmp/tmp.M1wp3azKVu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CqmtEefqvQ + cat /tmp/tmp.M1wp3azKVu + rm /tmp/tmp.CqmtEefqvQ /tmp/tmp.M1wp3azKVu + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.kMkliBXMM6/find-4nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 -4nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + local postfix=-4nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:18:12+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 == *cfg* ]] ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.t47NXNrogs +++ mktemp ++ local LAST_ERR=/tmp/tmp.CWe9IeJSmg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.t47NXNrogs ++ cat /tmp/tmp.CWe9IeJSmg ++ rm /tmp/tmp.t47NXNrogs /tmp/tmp.CWe9IeJSmg ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.SmWKJopRZd ++ mktemp + local LAST_ERR=/tmp/tmp.PzxnnxStK5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SmWKJopRZd + cat /tmp/tmp.PzxnnxStK5 + rm /tmp/tmp.SmWKJopRZd /tmp/tmp.PzxnnxStK5 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-4nd.json /tmp/tmp.kMkliBXMM6/find-4nd + desc 'Exposed, LoadBalancer -> ClusterIP' + set +o xtrace ----------------------------------------------------------------------------------- Exposed, LoadBalancer -> ClusterIP ----------------------------------------------------------------------------------- + expose_cluster ClusterIP + expose_type=ClusterIP + expose_status=true + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.axRa9kBgL0 ++ mktemp + local LAST_ERR=/tmp/tmp.xpgruUEY1A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": true, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": true, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.axRa9kBgL0 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.xpgruUEY1A + rm /tmp/tmp.axRa9kBgL0 /tmp/tmp.xpgruUEY1A + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zXV4WOMvoK +++ mktemp ++ local LAST_ERR=/tmp/tmp.ykpJyuIzMP ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zXV4WOMvoK ++ cat /tmp/tmp.ykpJyuIzMP ++ rm /tmp/tmp.zXV4WOMvoK /tmp/tmp.ykpJyuIzMP ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NU3sW3NBRi +++ mktemp ++ local LAST_ERR=/tmp/tmp.h13kLWu29U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NU3sW3NBRi ++ cat /tmp/tmp.h13kLWu29U ++ rm /tmp/tmp.NU3sW3NBRi /tmp/tmp.h13kLWu29U ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EhydZTO0eD +++ mktemp ++ local LAST_ERR=/tmp/tmp.ByQ2i7rKMU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EhydZTO0eD ++ cat /tmp/tmp.ByQ2i7rKMU ++ rm /tmp/tmp.EhydZTO0eD /tmp/tmp.ByQ2i7rKMU ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YgZxBV3kbl +++ mktemp ++ local LAST_ERR=/tmp/tmp.kh344RayGt ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YgZxBV3kbl ++ cat /tmp/tmp.kh344RayGt ++ rm /tmp/tmp.YgZxBV3kbl /tmp/tmp.kh344RayGt ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qqNAQmpzhd +++ mktemp ++ local LAST_ERR=/tmp/tmp.o9M0zXET9h ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qqNAQmpzhd ++ cat /tmp/tmp.o9M0zXET9h ++ rm /tmp/tmp.qqNAQmpzhd /tmp/tmp.o9M0zXET9h ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1qazQdb9eX +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qa53zCc3UR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1qazQdb9eX ++ cat /tmp/tmp.Qa53zCc3UR ++ rm /tmp/tmp.1qazQdb9eX /tmp/tmp.Qa53zCc3UR ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iYSHcD7Smd +++ mktemp ++ local LAST_ERR=/tmp/tmp.84O0o26jU7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iYSHcD7Smd ++ cat /tmp/tmp.84O0o26jU7 ++ rm /tmp/tmp.iYSHcD7Smd /tmp/tmp.84O0o26jU7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ir2AmxMD2f +++ mktemp ++ local LAST_ERR=/tmp/tmp.lUZdxF7Acy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ir2AmxMD2f ++ cat /tmp/tmp.lUZdxF7Acy ++ rm /tmp/tmp.ir2AmxMD2f /tmp/tmp.lUZdxF7Acy ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wrZgsfFGjR +++ mktemp ++ local LAST_ERR=/tmp/tmp.QMrONbV81E ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.wrZgsfFGjR ++ cat /tmp/tmp.QMrONbV81E ++ rm /tmp/tmp.wrZgsfFGjR /tmp/tmp.QMrONbV81E ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XXewcC3koA +++ mktemp ++ local LAST_ERR=/tmp/tmp.v3oAQqSK7Z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XXewcC3koA ++ cat /tmp/tmp.v3oAQqSK7Z ++ rm /tmp/tmp.XXewcC3koA /tmp/tmp.v3oAQqSK7Z ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100504 })' myApp:myPass@some-name-mongos.expose-sharded-9120 + local 'command=use myApp\n db.test.insert({ x: 100504 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RiY8rDAD8W +++ mktemp ++ local LAST_ERR=/tmp/tmp.ibpvdPxiQD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RiY8rDAD8W ++ cat /tmp/tmp.ibpvdPxiQD ++ rm /tmp/tmp.RiY8rDAD8W /tmp/tmp.ibpvdPxiQD ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.G2JU1Gn8aP ++ mktemp + local LAST_ERR=/tmp/tmp.iNsnOxvAoF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100504 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.G2JU1Gn8aP Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("fea4f45d-675b-4727-8769-a422fa58f6d8") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.iNsnOxvAoF + rm /tmp/tmp.G2JU1Gn8aP /tmp/tmp.iNsnOxvAoF + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-9120 -5nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-23T14:19:19+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-9120 mongodb '' '' 27017 + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo egrep: warning: egrep is obsolescent; using grep -E ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k3NlsEELKJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.exIoOFs0j2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k3NlsEELKJ ++ cat /tmp/tmp.exIoOFs0j2 ++ rm /tmp/tmp.k3NlsEELKJ /tmp/tmp.exIoOFs0j2 ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.K7ucDZvgsE ++ mktemp + local LAST_ERR=/tmp/tmp.0PkVQEhgQq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.K7ucDZvgsE + cat /tmp/tmp.0PkVQEhgQq + rm /tmp/tmp.K7ucDZvgsE /tmp/tmp.0PkVQEhgQq + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.kMkliBXMM6/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:19:22+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 == *cfg* ]] + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.4tYtIT5C9J +++ mktemp ++ local LAST_ERR=/tmp/tmp.S03G9IJ1f0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4tYtIT5C9J ++ cat /tmp/tmp.S03G9IJ1f0 ++ rm /tmp/tmp.4tYtIT5C9J /tmp/tmp.S03G9IJ1f0 ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.7IW8fUC5XM ++ mktemp + local LAST_ERR=/tmp/tmp.eggo4ugiSs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7IW8fUC5XM + cat /tmp/tmp.eggo4ugiSs + rm /tmp/tmp.7IW8fUC5XM /tmp/tmp.eggo4ugiSs + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.kMkliBXMM6/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:19:25+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 == *cfg* ]] + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K0Plb9MZ9k egrep: warning: egrep is obsolescent; using grep -E +++ mktemp ++ local LAST_ERR=/tmp/tmp.VFWueaHYs4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K0Plb9MZ9k ++ cat /tmp/tmp.VFWueaHYs4 ++ rm /tmp/tmp.K0Plb9MZ9k /tmp/tmp.VFWueaHYs4 ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.21xl777k0U ++ mktemp + local LAST_ERR=/tmp/tmp.vsQuVdHerw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.21xl777k0U + cat /tmp/tmp.vsQuVdHerw + rm /tmp/tmp.21xl777k0U /tmp/tmp.vsQuVdHerw + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.kMkliBXMM6/find-5nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 -5nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + local postfix=-5nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:19:28+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.vcv5GXXVWT +++ mktemp ++ local LAST_ERR=/tmp/tmp.Qqo9ak43Ni ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vcv5GXXVWT ++ cat /tmp/tmp.Qqo9ak43Ni ++ rm /tmp/tmp.vcv5GXXVWT /tmp/tmp.Qqo9ak43Ni ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.tiNp1lms4d ++ mktemp + local LAST_ERR=/tmp/tmp.erVnScTk0A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tiNp1lms4d + cat /tmp/tmp.erVnScTk0A + rm /tmp/tmp.tiNp1lms4d /tmp/tmp.erVnScTk0A + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-5nd.json /tmp/tmp.kMkliBXMM6/find-5nd + desc 'Exposed -> Unexposed' + set +o xtrace ----------------------------------------------------------------------------------- Exposed -> Unexposed ----------------------------------------------------------------------------------- + expose_cluster ClusterIP false + expose_type=ClusterIP + expose_status=false + kubectl_bin patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "type" : "ClusterIP" } }]' ++ mktemp + local LAST_OUT=/tmp/tmp.diZmKmr0X6 ++ mktemp + local LAST_ERR=/tmp/tmp.ZTD6GabmW2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb some-name --type=json --patch '[ { "op": "replace", "path": "/spec/replsets/0/expose", "value": { "enabled": false, "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/mongos/expose", "value": { "type" : "ClusterIP" } }, { "op": "replace", "path": "/spec/sharding/configsvrReplSet/expose", "value": { "enabled": false, "type" : "ClusterIP" } }]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.diZmKmr0X6 perconaservermongodb.psmdb.percona.com/some-name patched + cat /tmp/tmp.ZTD6GabmW2 + rm /tmp/tmp.diZmKmr0X6 /tmp/tmp.ZTD6GabmW2 + return 0 + wait_for_running some-name-rs0 3 + local name=some-name-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-rs0-0 + local pod=some-name-rs0-0 + set +o xtrace waiting for pod/some-name-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-rs0-1 + local pod=some-name-rs0-1 + set +o xtrace waiting for pod/some-name-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.N5Jad2NN16 +++ mktemp ++ local LAST_ERR=/tmp/tmp.BQ5z9MuM6g ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.N5Jad2NN16 ++ cat /tmp/tmp.BQ5z9MuM6g ++ rm /tmp/tmp.N5Jad2NN16 /tmp/tmp.BQ5z9MuM6g ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-rs0-2 + local pod=some-name-rs0-2 + set +o xtrace waiting for pod/some-name-rs0-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.kPyswIgC7S +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y83kWt33g6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.kPyswIgC7S ++ cat /tmp/tmp.Y83kWt33g6 ++ rm /tmp/tmp.kPyswIgC7S /tmp/tmp.Y83kWt33g6 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.09wWFHQC7y +++ mktemp ++ local LAST_ERR=/tmp/tmp.2cN03Wer3R ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.09wWFHQC7y ++ cat /tmp/tmp.2cN03Wer3R ++ rm /tmp/tmp.09wWFHQC7y /tmp/tmp.2cN03Wer3R ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_for_running some-name-cfg 3 false + local name=some-name-cfg + let last_pod=2 + local check_cluster_readyness=false + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=cfg + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-cfg-0 + local pod=some-name-cfg-0 + set +o xtrace waiting for pod/some-name-cfg-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-cfg-1 + local pod=some-name-cfg-1 + set +o xtrace waiting for pod/some-name-cfg-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KpCcYbjFOy +++ mktemp ++ local LAST_ERR=/tmp/tmp.ygHZSW5n6G ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KpCcYbjFOy ++ cat /tmp/tmp.ygHZSW5n6G ++ rm /tmp/tmp.KpCcYbjFOy /tmp/tmp.ygHZSW5n6G ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-cfg-2 + local pod=some-name-cfg-2 + set +o xtrace waiting for pod/some-name-cfg-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uOqNt4UupX +++ mktemp ++ local LAST_ERR=/tmp/tmp.haqZ6W1kN8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uOqNt4UupX ++ cat /tmp/tmp.haqZ6W1kN8 ++ rm /tmp/tmp.uOqNt4UupX /tmp/tmp.haqZ6W1kN8 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FEqKtNEo4W +++ mktemp ++ local LAST_ERR=/tmp/tmp.T6ttZPOx5k ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="cfg")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FEqKtNEo4W ++ cat /tmp/tmp.T6ttZPOx5k ++ rm /tmp/tmp.FEqKtNEo4W /tmp/tmp.T6ttZPOx5k ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ false == \t\r\u\e ]] + wait_for_running some-name-mongos 3 + local name=some-name-mongos + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=mongos + local cluster_name=some-name ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod some-name-mongos-0 + local pod=some-name-mongos-0 + set +o xtrace waiting for pod/some-name-mongos-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod some-name-mongos-1 + local pod=some-name-mongos-1 + set +o xtrace waiting for pod/some-name-mongos-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qW1Qc4ZhOp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Nv8OUcTxXp ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qW1Qc4ZhOp ++ cat /tmp/tmp.Nv8OUcTxXp ++ rm /tmp/tmp.qW1Qc4ZhOp /tmp/tmp.Nv8OUcTxXp ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod some-name-mongos-2 + local pod=some-name-mongos-2 + set +o xtrace waiting for pod/some-name-mongos-2 to be ready.OK ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iiACzez6J6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.rpedDdbGKx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iiACzez6J6 ++ cat /tmp/tmp.rpedDdbGKx ++ rm /tmp/tmp.iiACzez6J6 /tmp/tmp.rpedDdbGKx ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.chuuKxtpDT +++ mktemp ++ local LAST_ERR=/tmp/tmp.slaVx3s2gn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.spec.replsets[?(@.name=="mongos")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.chuuKxtpDT ++ cat /tmp/tmp.slaVx3s2gn ++ rm /tmp/tmp.chuuKxtpDT /tmp/tmp.slaVx3s2gn ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness + wait_cluster_consistency some-name + local cluster_name=some-name + local wait_time=32 + retry=0 + sleep 7 + echo -n 'waiting for cluster readyness' waiting for cluster readyness++ kubectl_bin get psmdb some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.o8zwJwTZ66 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mUMiY4HSQm ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.o8zwJwTZ66 ++ cat /tmp/tmp.mUMiY4HSQm ++ rm /tmp/tmp.o8zwJwTZ66 /tmp/tmp.mUMiY4HSQm ++ return 0 + [[ ready == \r\e\a\d\y ]] + echo .OK .OK + run_mongos 'use myApp\n db.test.insert({ x: 100505 })' myApp:myPass@some-name-mongos.expose-sharded-9120 + local 'command=use myApp\n db.test.insert({ x: 100505 })' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sBswpVcpsh +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y764nSFv2C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sBswpVcpsh ++ cat /tmp/tmp.Y764nSFv2C ++ rm /tmp/tmp.sBswpVcpsh /tmp/tmp.Y764nSFv2C ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.FqELqKay4N ++ mktemp + local LAST_ERR=/tmp/tmp.iPfWusJDKo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100505 })\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FqELqKay4N Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("a2521aa9-e6cd-42f4-98ad-1d4b8019925c") } Percona Server for MongoDB server version: v8.0.16-5 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.iPfWusJDKo + rm /tmp/tmp.FqELqKay4N /tmp/tmp.iPfWusJDKo + return 0 + compare_mongos_cmd find myApp:myPass@some-name-mongos.expose-sharded-9120 -6nd + local command=find + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local port=27017 + local tls=false + [[ false == \t\r\u\e ]] + mongos_command=run_mongos + log 'running db.test.command() in myApp' + set +o xtrace [2025-12-23T14:20:36+0000] running db.test.command() in myApp + run_mongos 'use myApp\n db.test.find()' myApp:myPass@some-name-mongos.expose-sharded-9120 mongodb '' '' 27017 + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-mongos.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local port=27017 + local mongo_bin=mongo + egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ echo .svc.cluster.local ++ awk -F: '{print $2}' egrep: warning: egrep is obsolescent; using grep -E + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27017 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Q3L14VdXKD +++ mktemp ++ local LAST_ERR=/tmp/tmp.gVE1MwHMjE ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Q3L14VdXKD ++ cat /tmp/tmp.gVE1MwHMjE ++ rm /tmp/tmp.Q3L14VdXKD /tmp/tmp.gVE1MwHMjE ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' ++ mktemp + local LAST_OUT=/tmp/tmp.LPqpHW40KK ++ mktemp + local LAST_ERR=/tmp/tmp.W0Tr2BGVJP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-mongos.expose-sharded-9120.svc.cluster.local:27017/admin ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LPqpHW40KK + cat /tmp/tmp.W0Tr2BGVJP + rm /tmp/tmp.LPqpHW40KK /tmp/tmp.W0Tr2BGVJP + return 0 + [[ 0 -eq 0 ]] + diff /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.kMkliBXMM6/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:20:38+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local uri=myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.WTinYpxiuj +++ mktemp ++ local LAST_ERR=/tmp/tmp.d5xSDmXvbd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WTinYpxiuj ++ cat /tmp/tmp.d5xSDmXvbd ++ rm /tmp/tmp.WTinYpxiuj /tmp/tmp.d5xSDmXvbd ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.EpJTn6VCUf ++ mktemp + local LAST_ERR=/tmp/tmp.F03bsPVAMR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-0.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EpJTn6VCUf + cat /tmp/tmp.F03bsPVAMR + rm /tmp/tmp.EpJTn6VCUf /tmp/tmp.F03bsPVAMR + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.kMkliBXMM6/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:20:41+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 mongodb '' '' + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.ZsCN9Mbxcl +++ mktemp ++ local LAST_ERR=/tmp/tmp.E9mxHr5Zwu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ZsCN9Mbxcl ++ cat /tmp/tmp.E9mxHr5Zwu ++ rm /tmp/tmp.ZsCN9Mbxcl /tmp/tmp.E9mxHr5Zwu ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.BPkVhNSVnC ++ mktemp + local LAST_ERR=/tmp/tmp.iHYYjKyhTk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-1.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BPkVhNSVnC + cat /tmp/tmp.iHYYjKyhTk + rm /tmp/tmp.BPkVhNSVnC /tmp/tmp.iHYYjKyhTk + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.kMkliBXMM6/find-6nd + compare_mongo_cmd find myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 -6nd + local command=find + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + local postfix=-6nd + local suffix= + local database=myApp + local collection=test + local sort= + local tls=false + local replicaset= + local 'full_command=db.test.find()' + [[ -n '' ]] + log 'running db.test.find() in myApp' + set +o xtrace [2025-12-23T14:20:44+0000] running db.test.find() in myApp + [[ false == \t\r\u\e ]] + mongo_command=run_mongo + run_mongo 'use myApp\n db.test.find()' myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 mongodb '' '' + local 'command=use myApp\n db.test.find()' + local uri=myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 + egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|Started a new thread for the timer service' + local driver=mongodb + local suffix=.svc.cluster.local + local mongo_flag= + local replica_set=rs0 + [[ myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120 == *cfg* ]] + /usr/sbin/sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/' ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp egrep: warning: egrep is obsolescent; using grep -E ++ local LAST_OUT=/tmp/tmp.e2T42rQTOd +++ mktemp ++ local LAST_ERR=/tmp/tmp.YrBVK5cXKJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e2T42rQTOd ++ cat /tmp/tmp.YrBVK5cXKJ ++ rm /tmp/tmp.e2T42rQTOd /tmp/tmp.YrBVK5cXKJ ++ return 0 + local client_container=psmdb-client-696897d69b-lr5sx + kubectl_bin exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' ++ mktemp + local LAST_OUT=/tmp/tmp.azBVbJbSIp ++ mktemp + local LAST_ERR=/tmp/tmp.QRHQTMwm8q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-696897d69b-lr5sx -- bash -c 'printf '\''use myApp\n db.test.find()\n'\'' | mongo mongodb://myApp:myPass@some-name-rs0-2.some-name-rs0.expose-sharded-9120.svc.cluster.local/admin?ssl=false\&replicaSet=rs0 ' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.azBVbJbSIp + cat /tmp/tmp.QRHQTMwm8q + rm /tmp/tmp.azBVbJbSIp /tmp/tmp.QRHQTMwm8q + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/expose-sharded/compare/find-6nd.json /tmp/tmp.kMkliBXMM6/find-6nd + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/container-rc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.b2knlkCIAV ++ mktemp + local LAST_ERR=/tmp/tmp.40qoeBMMas + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/e2e-tests/conf/container-rc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b2knlkCIAV runtimeclass.node.k8s.io "container-rc" deleted + cat /tmp/tmp.40qoeBMMas + rm /tmp/tmp.b2knlkCIAV /tmp/tmp.40qoeBMMas + return 0 + destroy expose-sharded-9120 + local namespace=expose-sharded-9120 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.xGdx3hhtvy +++ mktemp ++ local LAST_ERR=/tmp/tmp.4mba2si0kg ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.xGdx3hhtvy ++ cat /tmp/tmp.4mba2si0kg No resources found in expose-sharded-9120 namespace. ++ rm /tmp/tmp.xGdx3hhtvy /tmp/tmp.4mba2si0kg ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.xZ4mToMciE ++ mktemp + local LAST_ERR=/tmp/tmp.EiLHWdHydZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xZ4mToMciE customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.EiLHWdHydZ + rm /tmp/tmp.xZ4mToMciE /tmp/tmp.EiLHWdHydZ + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.iezaze0MLw ++ mktemp + local LAST_ERR=/tmp/tmp.Wq73YNFHri + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iezaze0MLw + cat /tmp/tmp.Wq73YNFHri + rm /tmp/tmp.iezaze0MLw /tmp/tmp.Wq73YNFHri + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.GUnKKTxmmW ++ mktemp + local LAST_ERR=/tmp/tmp.u2gouq1gx0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GUnKKTxmmW + cat /tmp/tmp.u2gouq1gx0 + rm /tmp/tmp.GUnKKTxmmW /tmp/tmp.u2gouq1gx0 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.Ay2Y9hOQsG ++ mktemp + local LAST_ERR=/tmp/tmp.BAAqGkfw23 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ay2Y9hOQsG + cat /tmp/tmp.BAAqGkfw23 + rm /tmp/tmp.Ay2Y9hOQsG /tmp/tmp.BAAqGkfw23 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.3pvxXEFvuL ++ mktemp + local LAST_ERR=/tmp/tmp.dJ1L7opGB4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2158/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3pvxXEFvuL clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.dJ1L7opGB4 + rm /tmp/tmp.3pvxXEFvuL /tmp/tmp.dJ1L7opGB4 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.w05vZ5TSr0 ++ mktemp + local LAST_ERR=/tmp/tmp.kAJ9BZ8EqO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.w05vZ5TSr0 + cat /tmp/tmp.kAJ9BZ8EqO Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.w05vZ5TSr0 + cat /tmp/tmp.kAJ9BZ8EqO Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.w05vZ5TSr0 + cat /tmp/tmp.kAJ9BZ8EqO Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.w05vZ5TSr0 + cat /tmp/tmp.kAJ9BZ8EqO Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.w05vZ5TSr0 /tmp/tmp.kAJ9BZ8EqO + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace expose-sharded-9120 + rm -rf /tmp/tmp.kMkliBXMM6 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.N0JqzVThSf ++ mktemp + local LAST_OUT=/tmp/tmp.P2voqbLLWT ++ mktemp + local LAST_ERR=/tmp/tmp.zqUt5lRWfm + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.4KYicT7ch7 + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace expose-sharded-9120